repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
Versent/ansible | v1/ansible/utils/su_prompts.py | 113 | 1576 | # -*- coding: utf-8 -*-
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import re
SU_PROMPT_LOCALIZATIONS = [
'Password',
'암호',
'パスワード',
'Adgangskode',
'Contraseña',
'Contrasenya',
'Hasło',
'Heslo',
'Jelszó',
'Lösenord',
'Mật khẩu',
'Mot de passe',
'Parola',
'Parool',
'Pasahitza',
'Passord',
'Passwort',
'Salasana',
'Sandi',
'Senha',
'Wachtwoord',
'ססמה',
'Лозинка',
'Парола',
'Пароль',
'गुप्तशब्द',
'शब्दकूट',
'సంకేతపదము',
'හස්පදය',
'密码',
'密碼',
]
SU_PROMPT_LOCALIZATIONS_RE = re.compile("|".join(['(\w+\'s )?' + x + ' ?: ?' for x in SU_PROMPT_LOCALIZATIONS]), flags=re.IGNORECASE)
def check_su_prompt(data):
return bool(SU_PROMPT_LOCALIZATIONS_RE.match(data))
| gpl-3.0 |
jerpat/csmake | csmake/test-TestPython-source/tests/test_failing_example.py | 2 | 1430 | # <copyright>
# (c) Copyright 2017 Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# </copyright>
import unittest
from example import example
from subdir.another_test_example import subdir_test_example
class test_example(unittest.TestCase):
def setUp(self):
self.options = self.csmake_test.options
self.log = self.csmake_test.log
def test_example_pass(self):
e = example()
self.assertEqual(e.myfunc(), 5)
def test_example_fail(self):
e = example()
self.assertEqual(e.myfunc(), 4)
def test_example_useoption(self):
e = subdir_test_example()
self.assertEqual(str(e.myfunc()), self.options['option-answer'])
def test_another_example(self):
e = another_example()
self.assertEqual(str(e.do()), 3)
| gpl-3.0 |
uday1889/gensim | gensim/interfaces.py | 33 | 10470 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
This module contains basic interfaces used throughout the whole gensim package.
The interfaces are realized as abstract base classes (ie., some optional functionality
is provided in the interface itself, so that the interfaces can be subclassed).
"""
from __future__ import with_statement
import logging
import itertools
from gensim import utils, matutils
from six.moves import xrange
logger = logging.getLogger('gensim.interfaces')
class CorpusABC(utils.SaveLoad):
"""
Interface (abstract base class) for corpora. A *corpus* is simply an iterable,
where each iteration step yields one document:
>>> for doc in corpus:
>>> # do something with the doc...
A document is a sequence of `(fieldId, fieldValue)` 2-tuples:
>>> for attr_id, attr_value in doc:
>>> # do something with the attribute
Note that although a default :func:`len` method is provided, it is very inefficient
(performs a linear scan through the corpus to determine its length). Wherever
the corpus size is needed and known in advance (or at least doesn't change so
that it can be cached), the :func:`len` method should be overridden.
See the :mod:`gensim.corpora.svmlightcorpus` module for an example of a corpus.
Saving the corpus with the `save` method (inherited from `utils.SaveLoad`) will
only store the *in-memory* (binary, pickled) object representation=the stream
state, and **not** the documents themselves. See the `save_corpus` static method
for serializing the actual stream content.
"""
def __iter__(self):
"""
Iterate over the corpus, yielding one document at a time.
"""
raise NotImplementedError('cannot instantiate abstract base class')
def save(self, *args, **kwargs):
import warnings
warnings.warn("corpus.save() stores only the (tiny) iteration object; "
"to serialize the actual corpus content, use e.g. MmCorpus.serialize(corpus)")
super(CorpusABC, self).save(*args, **kwargs)
def __len__(self):
"""
Return the number of documents in the corpus.
This method is just the least common denominator and should really be
overridden when possible.
"""
raise NotImplementedError("must override __len__() before calling len(corpus)")
# logger.warning("performing full corpus scan to determine its length; was this intended?")
# return sum(1 for doc in self) # sum(empty generator) == 0, so this works even for an empty corpus
@staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
"""
Save an existing `corpus` to disk.
Some formats also support saving the dictionary (`feature_id->word` mapping),
which can in this case be provided by the optional `id2word` parameter.
>>> MmCorpus.save_corpus('file.mm', corpus)
Some corpora also support an index of where each document begins, so
that the documents on disk can be accessed in O(1) time (see the
`corpora.IndexedCorpus` base class). In this case, `save_corpus` is automatically
called internally by `serialize`, which does `save_corpus` plus saves the index
at the same time, so you want to store the corpus with::
>>> MmCorpus.serialize('file.mm', corpus) # stores index as well, allowing random access to individual documents
Calling `serialize()` is preferred to calling `save_corpus()`.
"""
raise NotImplementedError('cannot instantiate abstract base class')
# example code:
logger.info("converting corpus to ??? format: %s" % fname)
with utils.smart_open(fname, 'wb') as fout:
for doc in corpus: # iterate over the document stream
fmt = str(doc) # format the document appropriately...
fout.write(utils.to_utf8("%s\n" % fmt)) # serialize the formatted document to disk
#endclass CorpusABC
class TransformedCorpus(CorpusABC):
def __init__(self, obj, corpus, chunksize=None):
self.obj, self.corpus, self.chunksize = obj, corpus, chunksize
self.metadata = False
def __len__(self):
return len(self.corpus)
def __iter__(self):
if self.chunksize:
for chunk in utils.grouper(self.corpus, self.chunksize):
for transformed in self.obj.__getitem__(chunk, chunksize=None):
yield transformed
else:
for doc in self.corpus:
yield self.obj[doc]
#endclass TransformedCorpus
class TransformationABC(utils.SaveLoad):
"""
Interface for transformations. A 'transformation' is any object which accepts
a sparse document via the dictionary notation `[]` and returns another sparse
document in its stead::
>>> transformed_doc = transformation[doc]
or also::
>>> transformed_corpus = transformation[corpus]
See the :mod:`gensim.models.tfidfmodel` module for an example of a transformation.
"""
def __getitem__(self, vec):
"""
Transform vector from one vector space into another
**or**
Transform a whole corpus into another.
"""
raise NotImplementedError('cannot instantiate abstract base class')
def _apply(self, corpus, chunksize=None):
"""
Apply the transformation to a whole corpus (as opposed to a single document)
and return the result as another corpus.
"""
return TransformedCorpus(self, corpus, chunksize)
#endclass TransformationABC
class SimilarityABC(utils.SaveLoad):
"""
Abstract interface for similarity searches over a corpus.
In all instances, there is a corpus against which we want to perform the
similarity search.
For each similarity search, the input is a document and the output are its
similarities to individual corpus documents.
Similarity queries are realized by calling ``self[query_document]``.
There is also a convenience wrapper, where iterating over `self` yields
similarities of each document in the corpus against the whole corpus (ie.,
the query is each corpus document in turn).
"""
def __init__(self, corpus):
raise NotImplementedError("cannot instantiate Abstract Base Class")
def get_similarities(self, doc):
# (Sparse)MatrixSimilarity override this method so that they both use the
# same __getitem__ method, defined below
raise NotImplementedError("cannot instantiate Abstract Base Class")
def __getitem__(self, query):
"""Get similarities of document `query` to all documents in the corpus.
**or**
If `query` is a corpus (iterable of documents), return a matrix of similarities
of all query documents vs. all corpus document. Using this type of batch
query is more efficient than computing the similarities one document after
another.
"""
is_corpus, query = utils.is_corpus(query)
if self.normalize:
# self.normalize only works if the input is a plain gensim vector/corpus (as
# advertised in the doc). in fact, input can be a numpy or scipy.sparse matrix
# as well, but in that case assume tricks are happening and don't normalize
# anything (self.normalize has no effect).
if matutils.ismatrix(query):
import warnings
# warnings.warn("non-gensim input must already come normalized")
else:
if is_corpus:
query = [matutils.unitvec(v) for v in query]
else:
query = matutils.unitvec(query)
result = self.get_similarities(query)
if self.num_best is None:
return result
# if the input query was a corpus (=more documents), compute the top-n
# most similar for each document in turn
if matutils.ismatrix(result):
return [matutils.full2sparse_clipped(v, self.num_best) for v in result]
else:
# otherwise, return top-n of the single input document
return matutils.full2sparse_clipped(result, self.num_best)
def __iter__(self):
"""
For each index document, compute cosine similarity against all other
documents in the index and yield the result.
"""
# turn off query normalization (vectors in the index are assumed to be already normalized)
norm = self.normalize
self.normalize = False
# Try to compute similarities in bigger chunks of documents (not
# one query = a single document after another). The point is, a
# bigger query of N documents is faster than N small queries of one
# document.
#
# After computing similarities of the bigger query in `self[chunk]`,
# yield the resulting similarities one after another, so that it looks
# exactly the same as if they had been computed with many small queries.
try:
chunking = self.chunksize > 1
except AttributeError:
# chunking not supported; fall back to the (slower) mode of 1 query=1 document
chunking = False
if chunking:
# assumes `self.corpus` holds the index as a 2-d numpy array.
# this is true for MatrixSimilarity and SparseMatrixSimilarity, but
# may not be true for other (future) classes..?
for chunk_start in xrange(0, self.index.shape[0], self.chunksize):
# scipy.sparse doesn't allow slicing beyond real size of the matrix
# (unlike numpy). so, clip the end of the chunk explicitly to make
# scipy.sparse happy
chunk_end = min(self.index.shape[0], chunk_start + self.chunksize)
chunk = self.index[chunk_start : chunk_end]
if chunk.shape[0] > 1:
for sim in self[chunk]:
yield sim
else:
yield self[chunk]
else:
for doc in self.index:
yield self[doc]
# restore old normalization value
self.normalize = norm
#endclass SimilarityABC
| gpl-3.0 |
felixjimenez/django | tests/select_related/models.py | 114 | 1906 | """
41. Tests for select_related()
``select_related()`` follows all relationships and pre-caches any foreign key
values so that complex trees can be fetched in a single query. However, this
isn't always a good idea, so the ``depth`` argument control how many "levels"
the select-related behavior will traverse.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Who remembers high school biology?
@python_2_unicode_compatible
class Domain(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Kingdom(models.Model):
name = models.CharField(max_length=50)
domain = models.ForeignKey(Domain)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Phylum(models.Model):
name = models.CharField(max_length=50)
kingdom = models.ForeignKey(Kingdom)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Klass(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Order(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Family(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Genus(models.Model):
name = models.CharField(max_length=50)
family = models.ForeignKey(Family)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Species(models.Model):
name = models.CharField(max_length=50)
genus = models.ForeignKey(Genus)
def __str__(self):
return self.name
| bsd-3-clause |
ysekky/GPy | GPy/util/mocap.py | 6 | 27949 | import os
import numpy as np
import math
from GPy.util import datasets as dat
class vertex:
def __init__(self, name, id, parents=[], children=[], meta = {}):
self.name = name
self.id = id
self.parents = parents
self.children = children
self.meta = meta
def __str__(self):
return self.name + '(' + str(self.id) + ').'
class tree:
def __init__(self):
self.vertices = []
self.vertices.append(vertex(name='root', id=0))
def __str__(self):
index = self.find_root()
return self.branch_str(index)
def branch_str(self, index, indent=''):
out = indent + str(self.vertices[index]) + '\n'
for child in self.vertices[index].children:
out+=self.branch_str(child, indent+' ')
return out
def find_children(self):
"""Take a tree and set the children according to the parents.
Takes a tree structure which lists the parents of each vertex
and computes the children for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].children = []
for i in range(len(self.vertices)):
for parent in self.vertices[i].parents:
if i not in self.vertices[parent].children:
self.vertices[parent].children.append(i)
def find_parents(self):
"""Take a tree and set the parents according to the children
Takes a tree structure which lists the children of each vertex
and computes the parents for each vertex and places them in."""
for i in range(len(self.vertices)):
self.vertices[i].parents = []
for i in range(len(self.vertices)):
for child in self.vertices[i].children:
if i not in self.vertices[child].parents:
self.vertices[child].parents.append(i)
def find_root(self):
"""Finds the index of the root node of the tree."""
self.find_parents()
index = 0
while len(self.vertices[index].parents)>0:
index = self.vertices[index].parents[0]
return index
def get_index_by_id(self, id):
"""Give the index associated with a given vertex id."""
for i in range(len(self.vertices)):
if self.vertices[i].id == id:
return i
raise ValueError('Reverse look up of id failed.')
def get_index_by_name(self, name):
"""Give the index associated with a given vertex name."""
for i in range(len(self.vertices)):
if self.vertices[i].name == name:
return i
raise ValueError('Reverse look up of name failed.')
def order_vertices(self):
"""Order vertices in the graph such that parents always have a lower index than children."""
ordered = False
while ordered == False:
for i in range(len(self.vertices)):
ordered = True
for parent in self.vertices[i].parents:
if parent>i:
ordered = False
self.swap_vertices(i, parent)
def swap_vertices(self, i, j):
"""
Swap two vertices in the tree structure array.
swap_vertex swaps the location of two vertices in a tree structure array.
:param tree: the tree for which two vertices are to be swapped.
:param i: the index of the first vertex to be swapped.
:param j: the index of the second vertex to be swapped.
:rval tree: the tree structure with the two vertex locations swapped.
"""
store_vertex_i = self.vertices[i]
store_vertex_j = self.vertices[j]
self.vertices[j] = store_vertex_i
self.vertices[i] = store_vertex_j
for k in range(len(self.vertices)):
for swap_list in [self.vertices[k].children, self.vertices[k].parents]:
if i in swap_list:
swap_list[swap_list.index(i)] = -1
if j in swap_list:
swap_list[swap_list.index(j)] = i
if -1 in swap_list:
swap_list[swap_list.index(-1)] = j
def rotation_matrix(xangle, yangle, zangle, order='zxy', degrees=False):
"""
Compute the rotation matrix for an angle in each direction.
This is a helper function for computing the rotation matrix for a given set of angles in a given order.
:param xangle: rotation for x-axis.
:param yangle: rotation for y-axis.
:param zangle: rotation for z-axis.
:param order: the order for the rotations.
"""
if degrees:
xangle = math.radians(xangle)
yangle = math.radians(yangle)
zangle = math.radians(zangle)
# Here we assume we rotate z, then x then y.
c1 = math.cos(xangle) # The x angle
c2 = math.cos(yangle) # The y angle
c3 = math.cos(zangle) # the z angle
s1 = math.sin(xangle)
s2 = math.sin(yangle)
s3 = math.sin(zangle)
# see http://en.wikipedia.org/wiki/Rotation_matrix for
# additional info.
if order=='zxy':
rot_mat = np.array([[c2*c3-s1*s2*s3, c2*s3+s1*s2*c3, -s2*c1],[-c1*s3, c1*c3, s1],[s2*c3+c2*s1*s3, s2*s3-c2*s1*c3, c2*c1]])
else:
rot_mat = np.eye(3)
for i in range(len(order)):
if order[i]=='x':
rot_mat = np.dot(np.array([[1, 0, 0], [0, c1, s1], [0, -s1, c1]]),rot_mat)
elif order[i] == 'y':
rot_mat = np.dot(np.array([[c2, 0, -s2], [0, 1, 0], [s2, 0, c2]]),rot_mat)
elif order[i] == 'z':
rot_mat = np.dot(np.array([[c3, s3, 0], [-s3, c3, 0], [0, 0, 1]]),rot_mat)
return rot_mat
# Motion capture data routines.
class skeleton(tree):
def __init__(self):
tree.__init__(self)
def connection_matrix(self):
connection = np.zeros((len(self.vertices), len(self.vertices)), dtype=bool)
for i in range(len(self.vertices)):
for j in range(len(self.vertices[i].children)):
connection[i, self.vertices[i].children[j]] = True
return connection
def to_xyz(self, channels):
raise NotImplementedError("this needs to be implemented to use the skeleton class")
def finalize(self):
"""After loading in a skeleton ensure parents are correct, vertex orders are correct and rotation matrices are correct."""
self.find_parents()
self.order_vertices()
self.set_rotation_matrices()
def smooth_angle_channels(self, channels):
"""Remove discontinuities in angle channels so that they don't cause artifacts in algorithms that rely on the smoothness of the functions."""
for vertex in self.vertices:
for col in vertex.meta['rot_ind']:
if col:
for k in range(1, channels.shape[0]):
diff=channels[k, col]-channels[k-1, col]
if abs(diff+360.)<abs(diff):
channels[k:, col]=channels[k:, col]+360.
elif abs(diff-360.)<abs(diff):
channels[k:, col]=channels[k:, col]-360.
# class bvh_skeleton(skeleton):
# def __init__(self):
# skeleton.__init__(self)
# def to_xyz(self, channels):
class acclaim_skeleton(skeleton):
def __init__(self, file_name=None):
skeleton.__init__(self)
self.documentation = []
self.angle = 'deg'
self.length = 1.0
self.mass = 1.0
self.type = 'acclaim'
self.vertices[0] = vertex(name='root', id=0,
parents = [0], children=[],
meta = {'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)})
if file_name:
self.load_skel(file_name)
def to_xyz(self, channels):
rot_val = list(self.vertices[0].meta['orientation'])
for i in range(len(self.vertices[0].meta['rot_ind'])):
rind = self.vertices[0].meta['rot_ind'][i]
if rind != -1:
rot_val[i] += channels[rind]
self.vertices[0].meta['rot'] = rotation_matrix(rot_val[0],
rot_val[1],
rot_val[2],
self.vertices[0].meta['axis_order'],
degrees=True)
# vertex based store of the xyz location
self.vertices[0].meta['xyz'] = list(self.vertices[0].meta['offset'])
for i in range(len(self.vertices[0].meta['pos_ind'])):
pind = self.vertices[0].meta['pos_ind'][i]
if pind != -1:
self.vertices[0].meta['xyz'][i] += channels[pind]
for i in range(len(self.vertices[0].children)):
ind = self.vertices[0].children[i]
self.get_child_xyz(ind, channels)
xyz = []
for vertex in self.vertices:
xyz.append(vertex.meta['xyz'])
return np.array(xyz)
def get_child_xyz(self, ind, channels):
parent = self.vertices[ind].parents[0]
children = self.vertices[ind].children
rot_val = np.zeros(3)
for j in range(len(self.vertices[ind].meta['rot_ind'])):
rind = self.vertices[ind].meta['rot_ind'][j]
if rind != -1:
rot_val[j] = channels[rind]
else:
rot_val[j] = 0
tdof = rotation_matrix(rot_val[0], rot_val[1], rot_val[2],
self.vertices[ind].meta['order'],
degrees=True)
torient = rotation_matrix(self.vertices[ind].meta['axis'][0],
self.vertices[ind].meta['axis'][1],
self.vertices[ind].meta['axis'][2],
self.vertices[ind].meta['axis_order'],
degrees=True)
torient_inv = rotation_matrix(-self.vertices[ind].meta['axis'][0],
-self.vertices[ind].meta['axis'][1],
-self.vertices[ind].meta['axis'][2],
self.vertices[ind].meta['axis_order'][::-1],
degrees=True)
self.vertices[ind].meta['rot'] = np.dot(np.dot(np.dot(torient_inv,tdof),torient),self.vertices[parent].meta['rot'])
self.vertices[ind].meta['xyz'] = self.vertices[parent].meta['xyz'] + np.dot(self.vertices[ind].meta['offset'],self.vertices[ind].meta['rot'])
for i in range(len(children)):
cind = children[i]
self.get_child_xyz(cind, channels)
def load_channels(self, file_name):
fid=open(file_name, 'r')
channels = self.read_channels(fid)
fid.close()
return channels
def save_channels(self, file_name, channels):
with open(file_name,'w') as fid:
self.writ_channels(fid, channels)
fid.close()
def load_skel(self, file_name):
"""
Loads an ASF file into a skeleton structure.
:param file_name: The file name to load in.
"""
fid = open(file_name, 'r')
self.read_skel(fid)
fid.close()
self.name = file_name
def read_bonedata(self, fid):
"""Read bone data from an acclaim skeleton file stream."""
bone_count = 0
lin = self.read_line(fid)
while lin[0]!=':':
parts = lin.split()
if parts[0] == 'begin':
bone_count += 1
self.vertices.append(vertex(name = '', id=np.NaN,
meta={'name': [],
'id': [],
'offset': [],
'orientation': [],
'axis': [0., 0., 0.],
'axis_order': [],
'C': np.eye(3),
'Cinv': np.eye(3),
'channels': [],
'bodymass': [],
'confmass': [],
'order': [],
'rot_ind': [],
'pos_ind': [],
'limits': [],
'xyz': np.array([0., 0., 0.]),
'rot': np.eye(3)}))
lin = self.read_line(fid)
elif parts[0]=='id':
self.vertices[bone_count].id = int(parts[1])
lin = self.read_line(fid)
self.vertices[bone_count].children = []
elif parts[0]=='name':
self.vertices[bone_count].name = parts[1]
lin = self.read_line(fid)
elif parts[0]=='direction':
direction = np.array([float(parts[1]), float(parts[2]), float(parts[3])])
lin = self.read_line(fid)
elif parts[0]=='length':
lgth = float(parts[1])
lin = self.read_line(fid)
elif parts[0]=='axis':
self.vertices[bone_count].meta['axis'] = np.array([float(parts[1]),
float(parts[2]),
float(parts[3])])
# order is reversed compared to bvh
self.vertices[bone_count].meta['axis_order'] = parts[-1][::-1].lower()
lin = self.read_line(fid)
elif parts[0]=='dof':
order = []
for i in range(1, len(parts)):
if parts[i]== 'rx':
chan = 'Xrotation'
order.append('x')
elif parts[i] =='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i] == 'rz':
chan = 'Zrotation'
order.append('z')
elif parts[i] == 'tx':
chan = 'Xposition'
elif parts[i] == 'ty':
chan = 'Yposition'
elif parts[i] == 'tz':
chan = 'Zposition'
elif parts[i] == 'l':
chan = 'length'
self.vertices[bone_count].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[bone_count].meta['order'] = order[::-1]
lin = self.read_line(fid)
elif parts[0]=='limits':
self.vertices[bone_count].meta['limits'] = [[float(parts[1][1:]), float(parts[2][:-1])]]
lin = self.read_line(fid)
while lin !='end':
parts = lin.split()
self.vertices[bone_count].meta['limits'].append([float(parts[0][1:]), float(parts[1][:-1])])
lin = self.read_line(fid)
self.vertices[bone_count].meta['limits'] = np.array(self.vertices[bone_count].meta['limits'])
elif parts[0]=='end':
self.vertices[bone_count].meta['offset'] = direction*lgth
lin = self.read_line(fid)
return lin
def read_channels(self, fid):
"""Read channels from an acclaim file."""
bones = [[] for i in self.vertices]
num_channels = 0
for vertex in self.vertices:
num_channels = num_channels + len(vertex.meta['channels'])
lin = self.read_line(fid)
while lin != ':DEGREES':
lin = self.read_line(fid)
if lin == '':
raise ValueError('Could not find :DEGREES in ' + fid.name)
counter = 0
lin = self.read_line(fid)
while lin:
parts = lin.split()
if len(parts)==1:
frame_no = int(parts[0])
if frame_no:
counter += 1
if counter != frame_no:
raise ValueError('Unexpected frame number.')
else:
raise ValueError('Single bone name ...')
else:
ind = self.get_index_by_name(parts[0])
bones[ind].append(np.array([float(channel) for channel in parts[1:]]))
lin = self.read_line(fid)
num_frames = counter
channels = np.zeros((num_frames, num_channels))
end_val = 0
for i in range(len(self.vertices)):
vertex = self.vertices[i]
if len(vertex.meta['channels'])>0:
start_val = end_val
end_val = end_val + len(vertex.meta['channels'])
for j in range(num_frames):
channels[j, start_val:end_val] = bones[i][j]
self.resolve_indices(i, start_val)
self.smooth_angle_channels(channels)
return channels
def writ_channels(self, fid, channels):
fid.write('#!OML:ASF \n')
fid.write(':FULLY-SPECIFIED\n')
fid.write(':DEGREES\n')
num_frames = channels.shape[0]
for i_frame in range(num_frames):
fid.write(str(i_frame+1)+'\n')
offset = 0
for vertex in self.vertices:
fid.write(vertex.name+' '+ ' '.join([str(v) for v in channels[i_frame,offset:offset+len(vertex.meta['channels'])]])+'\n')
offset += len(vertex.meta['channels'])
def read_documentation(self, fid):
"""Read documentation from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
self.documentation.append(lin)
lin = self.read_line(fid)
return lin
def read_hierarchy(self, fid):
"""Read hierarchy information from acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin != 'end':
parts = lin.split()
if lin != 'begin':
ind = self.get_index_by_name(parts[0])
for i in range(1, len(parts)):
self.vertices[ind].children.append(self.get_index_by_name(parts[i]))
lin = self.read_line(fid)
lin = self.read_line(fid)
return lin
def read_line(self, fid):
"""Read a line from a file string and check it isn't either empty or commented before returning."""
lin = '#'
while lin[0] == '#':
lin = fid.readline().strip()
if lin == '':
return lin
return lin
def read_root(self, fid):
"""Read the root node from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
parts = lin.split()
if parts[0]=='order':
order = []
for i in range(1, len(parts)):
if parts[i].lower()=='rx':
chan = 'Xrotation'
order.append('x')
elif parts[i].lower()=='ry':
chan = 'Yrotation'
order.append('y')
elif parts[i].lower()=='rz':
chan = 'Zrotation'
order.append('z')
elif parts[i].lower()=='tx':
chan = 'Xposition'
elif parts[i].lower()=='ty':
chan = 'Yposition'
elif parts[i].lower()=='tz':
chan = 'Zposition'
elif parts[i].lower()=='l':
chan = 'length'
self.vertices[0].meta['channels'].append(chan)
# order is reversed compared to bvh
self.vertices[0].meta['order'] = order[::-1]
elif parts[0]=='axis':
# order is reversed compared to bvh
self.vertices[0].meta['axis_order'] = parts[1][::-1].lower()
elif parts[0]=='position':
self.vertices[0].meta['offset'] = [float(parts[1]),
float(parts[2]),
float(parts[3])]
elif parts[0]=='orientation':
self.vertices[0].meta['orientation'] = [float(parts[1]),
float(parts[2]),
float(parts[3])]
lin = self.read_line(fid)
return lin
def read_skel(self, fid):
"""Loads an acclaim skeleton format from a file stream."""
lin = self.read_line(fid)
while lin:
if lin[0]==':':
if lin[1:]== 'name':
lin = self.read_line(fid)
self.name = lin
elif lin[1:]=='units':
lin = self.read_units(fid)
elif lin[1:]=='documentation':
lin = self.read_documentation(fid)
elif lin[1:]=='root':
lin = self.read_root(fid)
elif lin[1:]=='bonedata':
lin = self.read_bonedata(fid)
elif lin[1:]=='hierarchy':
lin = self.read_hierarchy(fid)
elif lin[1:8]=='version':
lin = self.read_line(fid)
continue
else:
if not lin:
self.finalize()
return
lin = self.read_line(fid)
else:
raise ValueError('Unrecognised file format')
self.finalize()
def read_units(self, fid):
"""Read units from an acclaim skeleton file stream."""
lin = self.read_line(fid)
while lin[0] != ':':
parts = lin.split()
if parts[0]=='mass':
self.mass = float(parts[1])
elif parts[0]=='length':
self.length = float(parts[1])
elif parts[0]=='angle':
self.angle = parts[1]
lin = self.read_line(fid)
return lin
def resolve_indices(self, index, start_val):
"""Get indices for the skeleton from the channels when loading in channel data."""
channels = self.vertices[index].meta['channels']
base_channel = start_val
rot_ind = -np.ones(3, dtype=int)
pos_ind = -np.ones(3, dtype=int)
for i in range(len(channels)):
if channels[i]== 'Xrotation':
rot_ind[0] = base_channel + i
elif channels[i]=='Yrotation':
rot_ind[1] = base_channel + i
elif channels[i]=='Zrotation':
rot_ind[2] = base_channel + i
elif channels[i]=='Xposition':
pos_ind[0] = base_channel + i
elif channels[i]=='Yposition':
pos_ind[1] = base_channel + i
elif channels[i]=='Zposition':
pos_ind[2] = base_channel + i
self.vertices[index].meta['rot_ind'] = list(rot_ind)
self.vertices[index].meta['pos_ind'] = list(pos_ind)
def set_rotation_matrices(self):
"""Set the meta information at each vertex to contain the correct matrices C and Cinv as prescribed by the rotations and rotation orders."""
for i in range(len(self.vertices)):
self.vertices[i].meta['C'] = rotation_matrix(self.vertices[i].meta['axis'][0],
self.vertices[i].meta['axis'][1],
self.vertices[i].meta['axis'][2],
self.vertices[i].meta['axis_order'],
degrees=True)
# Todo: invert this by applying angle operations in reverse order
self.vertices[i].meta['Cinv'] = np.linalg.inv(self.vertices[i].meta['C'])
# Utilities for loading in x,y,z data.
def load_text_data(dataset, directory, centre=True):
"""Load in a data set of marker points from the Ohio State University C3D motion capture files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""
points, point_names = parse_text(os.path.join(directory, dataset + '.txt'))[0:2]
# Remove markers where there is a NaN
present_index = [i for i in range(points[0].shape[1]) if not (np.any(np.isnan(points[0][:, i])) or np.any(np.isnan(points[0][:, i])) or np.any(np.isnan(points[0][:, i])))]
point_names = point_names[present_index]
for i in range(3):
points[i] = points[i][:, present_index]
if centre:
points[i] = (points[i].T - points[i].mean(axis=1)).T
# Concatanate the X, Y and Z markers together
Y = np.concatenate((points[0], points[1], points[2]), axis=1)
Y = Y/400.
connect = read_connections(os.path.join(directory, 'connections.txt'), point_names)
return Y, connect
def parse_text(file_name):
"""Parse data from Ohio State University text mocap files (http://accad.osu.edu/research/mocap/mocap_data.htm)."""
# Read the header
fid = open(file_name, 'r')
point_names = np.array(fid.readline().split())[2:-1:3]
fid.close()
for i in range(len(point_names)):
point_names[i] = point_names[i][0:-2]
# Read the matrix data
S = np.loadtxt(file_name, skiprows=1)
field = np.uint(S[:, 0])
times = S[:, 1]
S = S[:, 2:]
# Set the -9999.99 markers to be not present
S[S==-9999.99] = np.NaN
# Store x, y and z in different arrays
points = []
points.append(S[:, 0:-1:3])
points.append(S[:, 1:-1:3])
points.append(S[:, 2:-1:3])
return points, point_names, times
def read_connections(file_name, point_names):
"""Read a file detailing which markers should be connected to which for motion capture data."""
connections = []
fid = open(file_name, 'r')
line=fid.readline()
while(line):
connections.append(np.array(line.split(',')))
connections[-1][0] = connections[-1][0].strip()
connections[-1][1] = connections[-1][1].strip()
line = fid.readline()
connect = np.zeros((len(point_names), len(point_names)),dtype=bool)
for i in range(len(point_names)):
for j in range(len(point_names)):
for k in range(len(connections)):
if connections[k][0] == point_names[i] and connections[k][1] == point_names[j]:
connect[i,j]=True
connect[j,i]=True
break
return connect
skel = acclaim_skeleton()
| bsd-3-clause |
Zord13appdesa/python-for-android | python-build/python-libs/gdata/build/lib/gdata/health/__init__.py | 263 | 7090 | #!/usr/bin/python
#
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Health."""
__author__ = 'api.eric@google.com (Eric Bidelman)'
import atom
import gdata
CCR_NAMESPACE = 'urn:astm-org:CCR'
METADATA_NAMESPACE = 'http://schemas.google.com/health/metadata'
class Ccr(atom.AtomBase):
"""Represents a Google Health <ContinuityOfCareRecord>."""
_tag = 'ContinuityOfCareRecord'
_namespace = CCR_NAMESPACE
_children = atom.AtomBase._children.copy()
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
atom.AtomBase.__init__(self, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
def GetAlerts(self):
"""Helper for extracting Alert/Allergy data from the CCR.
Returns:
A list of ExtensionElements (one for each allergy found) or None if
no allergies where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Alerts')[0].FindChildren('Alert')
except:
return None
def GetAllergies(self):
"""Alias for GetAlerts()."""
return self.GetAlerts()
def GetProblems(self):
"""Helper for extracting Problem/Condition data from the CCR.
Returns:
A list of ExtensionElements (one for each problem found) or None if
no problems where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Problems')[0].FindChildren('Problem')
except:
return None
def GetConditions(self):
"""Alias for GetProblems()."""
return self.GetProblems()
def GetProcedures(self):
"""Helper for extracting Procedure data from the CCR.
Returns:
A list of ExtensionElements (one for each procedure found) or None if
no procedures where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Procedures')[0].FindChildren('Procedure')
except:
return None
def GetImmunizations(self):
"""Helper for extracting Immunization data from the CCR.
Returns:
A list of ExtensionElements (one for each immunization found) or None if
no immunizations where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Immunizations')[0].FindChildren('Immunization')
except:
return None
def GetMedications(self):
"""Helper for extracting Medication data from the CCR.
Returns:
A list of ExtensionElements (one for each medication found) or None if
no medications where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Medications')[0].FindChildren('Medication')
except:
return None
def GetResults(self):
"""Helper for extracting Results/Labresults data from the CCR.
Returns:
A list of ExtensionElements (one for each result found) or None if
no results where found in this CCR.
"""
try:
body = self.FindExtensions('Body')[0]
return body.FindChildren('Results')[0].FindChildren('Result')
except:
return None
class ProfileEntry(gdata.GDataEntry):
"""The Google Health version of an Atom Entry."""
_tag = gdata.GDataEntry._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}ContinuityOfCareRecord' % CCR_NAMESPACE] = ('ccr', Ccr)
def __init__(self, ccr=None, author=None, category=None, content=None,
atom_id=None, link=None, published=None, title=None,
updated=None, text=None, extension_elements=None,
extension_attributes=None):
self.ccr = ccr
gdata.GDataEntry.__init__(
self, author=author, category=category, content=content,
atom_id=atom_id, link=link, published=published, title=title,
updated=updated, extension_elements=extension_elements,
extension_attributes=extension_attributes, text=text)
class ProfileFeed(gdata.GDataFeed):
"""A feed containing a list of Google Health profile entries."""
_tag = gdata.GDataFeed._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileEntry])
class ProfileListEntry(gdata.GDataEntry):
"""The Atom Entry in the Google Health profile list feed."""
_tag = gdata.GDataEntry._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def GetProfileId(self):
return self.content.text
def GetProfileName(self):
return self.title.text
class ProfileListFeed(gdata.GDataFeed):
"""A feed containing a list of Google Health profile list entries."""
_tag = gdata.GDataFeed._tag
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [ProfileListEntry])
def ProfileEntryFromString(xml_string):
"""Converts an XML string into a ProfileEntry object.
Args:
xml_string: string The XML describing a Health profile feed entry.
Returns:
A ProfileEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileEntry, xml_string)
def ProfileListEntryFromString(xml_string):
"""Converts an XML string into a ProfileListEntry object.
Args:
xml_string: string The XML describing a Health profile list feed entry.
Returns:
A ProfileListEntry object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileListEntry, xml_string)
def ProfileFeedFromString(xml_string):
"""Converts an XML string into a ProfileFeed object.
Args:
xml_string: string The XML describing a ProfileFeed feed.
Returns:
A ProfileFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileFeed, xml_string)
def ProfileListFeedFromString(xml_string):
"""Converts an XML string into a ProfileListFeed object.
Args:
xml_string: string The XML describing a ProfileListFeed feed.
Returns:
A ProfileListFeed object corresponding to the given XML.
"""
return atom.CreateClassFromXMLString(ProfileListFeed, xml_string)
| apache-2.0 |
vrsys/avangong | avango-menu/python/avango/menu/widget/__init__.py | 6 | 1776 | # -*- Mode:Python -*-
##########################################################################
# #
# This file is part of AVANGO. #
# #
# Copyright 1997 - 2008 Fraunhofer-Gesellschaft zur Foerderung der #
# angewandten Forschung (FhG), Munich, Germany. #
# #
# AVANGO is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Lesser General Public License as #
# published by the Free Software Foundation, version 3. #
# #
# AVANGO is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU Lesser General Public #
# License along with AVANGO. If not, see <http://www.gnu.org/licenses/>. #
# #
##########################################################################
from _WidgetBase import WidgetBase
from _Container import Container
from _Divider import Divider
from _PushButton import PushButton
from _CheckBox import CheckBox
from _RadioButton import RadioButton
from _Slider import Slider
from _Image import Image
| lgpl-3.0 |
LinguList/server | app/lpserver/util.py | 2 | 2564 | # author : Johann-Mattis List
# email : mattis.list@uni-marburg.de
# created : 2014-12-05 22:36
# modified : 2014-12-05 22:36
"""
Utility functions for LingPy Server.
"""
__author__="Johann-Mattis List"
__date__="2014-12-05"
from glob import glob
import io
import unicodedata
import os
from pathlib import Path
from six import text_type
def lines_to_text(lines):
return ''.join(line if line.endswith('\n') else line + '\n' for line in lines)
# function forked from @xrotwang's addons to LingPy
def _str_path(path, mkdir=False):
"""Get a file-system path as text_type, suitable for passing into io.open.
:param path: A fs path either as Path instance or as text_type.
:param mkdir: If True, create the directories within the path.
:return: The path as text_type.
"""
res = text_type(path) if isinstance(path, Path) else path
if mkdir:
dirname = os.path.dirname(res)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
return res
def normalize_path(path):
"""Normalize a path for different platforms."""
return os.path.join(*path.split('/'))
def write_text_file(path, content, normalize=None):
"""Write a text file encoded in utf-8.
:param path: File-system path of the file.
:content: The text content to be written.
:param normalize: If not `None` a valid unicode normalization mode must be passed.
"""
# modify path to be usable in windows
path = normalize_path(path)
if not isinstance(content, text_type):
content = lines_to_text(content)
with io.open(_str_path(path, mkdir=True), 'w', encoding='utf8') as fp:
fp.write(unicodedata.normalize(normalize, content) if normalize else content)
def read_text_file(path, normalize=None, lines=False):
"""Read a text file encoded in utf-8.
:param path: File-system path of the file.
:param normalize: If not `None` a valid unicode normalization mode must be passed.
:param lines: Flag signalling whether to return a list of lines.
:return: File content as unicode object or list of lines as unicode objects.
.. note:: The whole file is read into memory.
"""
# modify path to be usable in windoof
path = normalize_path(path)
def _normalize(chunk):
return unicodedata.normalize(normalize, chunk) if normalize else chunk
with io.open(_str_path(path), 'r', encoding='utf8') as fp:
if lines:
return [_normalize(line) for line in fp]
else:
return _normalize(fp.read())
| gpl-2.0 |
sv-dev1/odoo | addons/website/models/test_models.py | 335 | 1386 | # -*- coding: utf-8 -*-
from openerp.osv import orm, fields
class test_converter(orm.Model):
_name = 'website.converter.test'
# disable translation export for those brilliant field labels and values
_translate = False
_columns = {
'char': fields.char(),
'integer': fields.integer(),
'float': fields.float(),
'numeric': fields.float(digits=(16, 2)),
'many2one': fields.many2one('website.converter.test.sub'),
'binary': fields.binary(),
'date': fields.date(),
'datetime': fields.datetime(),
'selection': fields.selection([
(1, "réponse A"),
(2, "réponse B"),
(3, "réponse C"),
(4, "réponse D"),
]),
'selection_str': fields.selection([
('A', "Qu'il n'est pas arrivé à Toronto"),
('B', "Qu'il était supposé arriver à Toronto"),
('C', "Qu'est-ce qu'il fout ce maudit pancake, tabernacle ?"),
('D', "La réponse D"),
], string=u"Lorsqu'un pancake prend l'avion à destination de Toronto et "
u"qu'il fait une escale technique à St Claude, on dit:"),
'html': fields.html(),
'text': fields.text(),
}
class test_converter_sub(orm.Model):
_name = 'website.converter.test.sub'
_columns = {
'name': fields.char(),
}
| agpl-3.0 |
echodaemon/LVDOWin | GUI.py | 2 | 11065 | # -*- coding: utf-8 -*-
"""
This is a sample GUI for LVDOWin written by Aaron Gokaslan.
The GUI just feeds a very simplistic set of parameters to and from video.
"""
from PySide import QtGui
from PySide.QtCore import Qt, QRegExp
from subprocess import Popen, PIPE, STDOUT
import os
########################################################################
class OsirisGUI(QtGui.QWidget):
""""""
#----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
# super(DialogDemo, self).__init__()
QtGui.QWidget.__init__(self)
self.label = QtGui.QLabel("Welcome to LVDOWin!")
self.label.setAlignment(Qt.AlignCenter| Qt.AlignCenter)
self.te = QtGui.QTextEdit()
self.te.setEnabled(False)
# create the buttons
encodeFileBtn = QtGui.QPushButton("Convert file to video")
decodeFileBtn = QtGui.QPushButton("Convert a video to a file")
# connect the buttons to the functions (signals to slots)
encodeFileBtn.clicked.connect(self.encodeFile)
decodeFileBtn.clicked.connect(self.decodeFile)
encodeFileBtn.setToolTip("Saves the file as output.mkv")
decodeFileBtn.setToolTip("Decodes the file at the specified")
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.addWidget(encodeFileBtn)
buttonLayout.addWidget(decodeFileBtn)
io_box = self.createIOBox()
settings_box = self.createSettings()
layout = QtGui.QVBoxLayout()
layout.addWidget(self.label)
layout.addWidget(io_box)
#layout.addWidget(settings_box)
layout.addWidget(self.te)
layout.addLayout(buttonLayout)
self.setLayout(layout)
self.adjustSize()
self.move(QtGui.QDesktopWidget().availableGeometry().center()
- self.frameGeometry().center())
self.setWindowTitle("LVDOWin - GUI")
# ...
def createIOBox(self):
source_component = QtGui.QVBoxLayout()
sourceLabel = QtGui.QLabel("Input File:")
sourceLabel.setToolTip("Specifies what file you want to convert")
sourceLabel.setAlignment(Qt.AlignCenter| Qt.AlignCenter)
sourceLayout = QtGui.QHBoxLayout()
openBtn = QtGui.QPushButton("...")
width = openBtn.fontMetrics().boundingRect(openBtn.text()).width() + 14
openBtn.setMaximumWidth(width)
self.sourcePath = QtGui.QLineEdit(".." + os.path.sep + "message.txt")
self.sourcePath.setEnabled(False)
openBtn.clicked.connect(self.setSourcePath)
sourceLayout.addWidget(sourceLabel)
sourceLayout.addWidget(self.sourcePath)
sourceLayout.addWidget(openBtn)
#source_component.addWidget(sourceLabel)
source_component.addLayout(sourceLayout)
#source_box = self.BorderBox(source_component)
destination_component = QtGui.QVBoxLayout()
destinationLabel = QtGui.QLabel("Output File:")
destinationLabel.setToolTip("Specifies where you want to save the file")
destinationLabel.setAlignment(Qt.AlignCenter| Qt.AlignCenter)
destination_layout = QtGui.QHBoxLayout()
#TODO Add a wrapper the preserves the original filename
saveBtn = QtGui.QPushButton('...')
width = saveBtn.fontMetrics().boundingRect(saveBtn.text()).width() + 14
saveBtn.setMaximumWidth(width)
self.destinationPath = QtGui.QLineEdit(".." + os.path.sep + "output.mkv")
self.destinationPath.setEnabled(False)
saveBtn.clicked.connect(self.setDestinationPath)
destination_layout.addWidget(destinationLabel)
destination_layout.addWidget(self.destinationPath)
destination_layout.addWidget(saveBtn)
#destination_component.addWidget(destinationLabel)
destination_component.addLayout(destination_layout)
#destination_box = self.BorderBox(destination_component)
io_component = QtGui.QVBoxLayout()
io_component.addLayout(source_component)
io_component.addLayout(destination_component)
io_box = self.BorderBox(io_component)
return io_box
def createSettings(self):
screen_res_box = QtGui.QHBoxLayout()
screen_res_box.addWidget(QtGui.QLabel('Video resolution'))
self.screenResolution = QtGui.QLineEdit('640x480')
self.screenResolution.setValidator(
QtGui.QRegExpValidator(QRegExp('[0-9]{3,}x[0-9]{3,}'),
self.screenResolution))
self.screenResolution.textChanged.connect(self.check_state)
self.screenResolution.textChanged.emit(self.screenResolution.text())
self.screenResolution.setEnabled(False)
screen_res_box.addWidget(self.screenResolution)
return self.BorderBox(screen_res_box)
def setSourcePath(self):
import re
path = self.exec_file_open_dialog()
if path is None:
return
path = re.sub('(.)', r'\1', path)
self.sourcePath.setText(str(path))
def setDestinationPath(self):
import re
path = self.exec_file_save_dialog()
if path is None:
return
path = re.sub('(.)', r'\1', path)
self.destinationPath.setText(str(path))
def BorderBox(self, layout):
from PySide.QtGui import QFrame
toto = QFrame()
toto.setLayout(layout)
toto.setFrameShape(QFrame.Box)
toto.setFrameShadow(QFrame.Sunken)
return toto
def popupMessage(self, message):
from PySide.QtGui import QMessageBox
msgBox = QMessageBox()
msgBox.setWindowTitle('Warning!!!')
msgBox.setText(message)
msgBox.exec_()
def HLine(self):
from PySide.QtGui import QFrame
toto = QFrame()
toto.setFrameShape(QFrame.HLine)
toto.setFrameShadow(QFrame.Sunken)
return toto
def normalOutputWritten(self, text):
"""Append text to the QTextEdit."""
# Maybe QTextEdit.append() works as well, but this is how I do it:
cursor = self.te.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.insertText(text)
self.te.setTextCursor(cursor)
self.te.ensureCursorVisible()
#------O----------------------------------------------------------------
def encodeFile(self):
"""
Opens a file dialog and encodes the file as output.mkv
"""
path = self.sourcePath.text()
#This is necessary since we are using the shell.
if path is None:
return
destination_text = self.destinationPath.text()
input_res = '640x480'
#TODO Reimpliment with shell off when you find an alternative for type
cmd = 'type "%s"' % path
cmd += ' | lvdoenc -s ' + input_res + ' -q 6 --qmin 1 --qmax 4 | '
cmd += 'x264 --input-res ' + input_res + ' --fps 1 --profile high --level 5.1 --tune stillimage '
cmd += '--crf 22 --colormatrix bt709 --me dia '
cmd += '--merange 0 -o "%s" -' % destination_text
bin_directory = os.getcwd() + os.path.sep +'bin'
p = Popen(cmd, stdout = PIPE,
stderr = STDOUT, shell = True, cwd=bin_directory,
bufsize = 0, universal_newlines=True)
while True:
line = p.stdout.readline()
print(line.rstrip())
self.normalOutputWritten(str(line))
self.te.repaint()
if not line: break
def decodeFile(self):
"""
Opens a file dialog and decodes the file as message.txt
"""
path = self.sourcePath.text()
#This is necessary since we are using the shell.
if path is None:
return
if not isVideo(path):
self.popupMessage('File: The input file does not seem to be a video file')
return
#TODO Reimpliment with shell off when you find an alternative for type
input_res = '640x480'
cmd = 'ffmpeg -i "%s"' % path
cmd += ' -r 1 -f rawvideo - | lvdodec -s ' + input_res + ' -q 6 --qmin 1 --qmax 4 > '
#cmd = cmd.split()
cmd = cmd + '"' + self.destinationPath.text() + '"'
bin_directory = os.getcwd() + os.path.sep +'bin'
p = Popen(cmd, stdout = PIPE,
stderr = STDOUT, shell = True, cwd=bin_directory,
bufsize = 0, universal_newlines=True)
while True:
line = p.stdout.readline()
# print(line.rstrip())
self.normalOutputWritten(str(line))
self.te.repaint()
if not line: break
def exec_file_open_dialog(self):
path, _ = QtGui.QFileDialog.getOpenFileName(self, "Open File", os.getcwd())
if path:
path = path = path.replace("/", os.path.sep)
return path
else:
return None
def exec_file_save_dialog(self):
path, _ = QtGui.QFileDialog.getSaveFileName(self, "Save file", "")
print(path)
if path:
path = path = path.replace("/", os.path.sep)
return path
else:
return None
#----------------------------------------------------------------------
def openDirectoryDialog(self):
"""
Opens a dialog to allow user to choose a directory
"""
flags = QtGui.QFileDialog.DontResolveSymlinks | QtGui.QFileDialog.ShowDirsOnly
d = directory = QtGui.QFileDialog.getExistingDirectory(self,
"Open Directory",
os.getcwd(),
flags)
self.label.setText(d)
def check_state(self, *args, **kwargs):
sender = self.sender()
validator = sender.validator()
state = validator.validate(sender.text(), 0)[0]
if state == QtGui.QValidator.Acceptable:
color = '#c4df9b' # green
elif state == QtGui.QValidator.Intermediate:
color = '#fff79a' # yellow
else:
color = '#f6989d' # red
sender.setStyleSheet('QLineEdit { background-color: %s }' % color)
def isVideo(filename):
filename, file_extension = os.path.splitext(filename)
valid_filetypes = ['.mp4', '.flv', '.avi', '.wmv', '.mkv']
return file_extension in valid_filetypes
#----------------------------------------------------------------------
if __name__ == "__main__":
app = QtGui.QApplication([])
form = OsirisGUI()
form.show()
app.exec_()
| gpl-3.0 |
mosbasik/buzhug | javasrc/lib/Jython/Lib/test/test_float_jy.py | 9 | 4186 | """Float tests
Made for Jython.
"""
import math
import sys
import unittest
from test import test_support
jython = test_support.is_jython
class FloatTestCase(unittest.TestCase):
def test_float_repr(self):
self.assertEqual(repr(12345678.000000005), '12345678.000000006')
self.assertEqual(repr(12345678.0000000005), '12345678.0')
self.assertEqual(repr(math.pi**-100),
jython and '1.9275814160560203e-50' or '1.9275814160560206e-50')
self.assertEqual(repr(-1.0), '-1.0')
self.assertEqual(repr(-9876.543210),
jython and '-9876.54321' or '-9876.5432099999998')
self.assertEqual(repr(0.123456789e+35), '1.23456789e+34')
def test_float_str(self):
self.assertEqual(str(12345678.000005), '12345678.0')
self.assertEqual(str(12345678.00005),
jython and '12345678.0' or '12345678.0001')
self.assertEqual(str(12345678.00005),
jython and '12345678.0' or '12345678.0001')
self.assertEqual(str(12345678.0005), '12345678.0005')
self.assertEqual(str(math.pi**-100), '1.92758141606e-50')
self.assertEqual(str(0.0), '0.0')
self.assertEqual(str(-1.0), '-1.0')
self.assertEqual(str(-9876.543210), '-9876.54321')
self.assertEqual(str(23456789012E666), 'inf')
self.assertEqual(str(-23456789012E666), '-inf')
def test_float_str_formatting(self):
self.assertEqual('%.13g' % 12345678.00005, '12345678.00005')
self.assertEqual('%.12g' % 12345678.00005,
jython and '12345678' or '12345678.0001')
self.assertEqual('%.11g' % 12345678.00005, '12345678')
# XXX: The exponential formatter isn't totally correct, e.g. our
# output here is really .13g
self.assertEqual('%.12g' % math.pi**-100, '1.92758141606e-50')
self.assertEqual('%.5g' % 123.005, '123')
self.assertEqual('%#.5g' % 123.005, '123.00')
self.assertEqual('%#g' % 0.001, '0.00100000')
self.assertEqual('%#.5g' % 0.001, '0.0010000')
self.assertEqual('%#.1g' % 0.0001, '0.0001')
self.assertEqual('%#.4g' % 100, '100.0')
self.assertEqual('%#.4g' % 100.25, '100.2')
self.assertEqual('%g' % 0.00001, '1e-05')
self.assertEqual('%#g' % 0.00001, '1.00000e-05')
self.assertEqual('%e' % -400.0, '-4.000000e+02')
self.assertEqual('%.2g' % 99, '99')
self.assertEqual('%.2g' % 100, '1e+02')
def test_overflow(self):
shuge = '12345' * 120
shuge_float = float(shuge)
shuge_int = int(shuge)
self.assertRaises(OverflowError, float, shuge_int)
self.assertRaises(OverflowError, int, shuge_float)
# and cmp should not overflow
self.assertNotEqual(0.1, shuge_int)
def test_nan(self):
nan = float('nan')
self.assert_(type(nan), float)
if jython:
# support Java syntax
self.assert_(type(float('NaN')), float)
# CPython 2.4/2.5 allow this
self.assertEqual(long(nan), 0)
self.assertNotEqual(nan, float('nan'))
self.assertNotEqual(nan, nan)
self.assertEqual(cmp(nan, float('nan')), 1)
self.assertEqual(cmp(nan, nan), 0)
for i in (-1, 1, -1.0, 1.0):
self.assertEqual(cmp(nan, i), -1)
self.assertEqual(cmp(i, nan), 1)
def test_infinity(self):
self.assert_(type(float('Infinity')), float)
self.assert_(type(float('inf')), float)
self.assertRaises(OverflowError, long, float('Infinity'))
def test_float_none(self):
self.assertRaises(TypeError, float, None)
def test_pow(self):
class Foo(object):
def __rpow__(self, other):
return other ** 2
# regression in 2.5 alphas
self.assertEqual(4.0 ** Foo(), 16.0)
def test_faux(self):
class F(object):
def __float__(self):
return 1.6
self.assertEqual(math.cos(1.6), math.cos(F()))
def test_main():
test_support.run_unittest(FloatTestCase)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
giggsey/SickRage | lib/hachoir_parser/guess.py | 56 | 4384 | """
Parser list managment:
- createParser() find the best parser for a file.
"""
import os
from hachoir_core.error import warning, info, HACHOIR_ERRORS
from hachoir_parser import ValidateError, HachoirParserList
from hachoir_core.stream import FileInputStream
from hachoir_core.i18n import _
import weakref
class QueryParser(object):
fallback = None
other = None
def __init__(self, tags):
self.validate = True
self.use_fallback = False
self.parser_args = None
self.db = HachoirParserList.getInstance()
self.parsers = set(self.db)
parsers = []
for tag in tags:
if not self.parsers:
break
parsers += self._getByTag(tag)
if self.fallback is None:
self.fallback = len(parsers) == 1
if self.parsers:
other = len(parsers)
parsers += list(self.parsers)
self.other = parsers[other]
self.parsers = parsers
def __iter__(self):
return iter(self.parsers)
def translate(self, name, value):
if name == "filename":
filename = os.path.basename(value).split(".")
if len(filename) <= 1:
value = ""
else:
value = filename[-1].lower()
name = "file_ext"
return name, value
def _getByTag(self, tag):
if tag is None:
self.parsers.clear()
return []
elif callable(tag):
parsers = [ parser for parser in self.parsers if tag(parser) ]
for parser in parsers:
self.parsers.remove(parser)
elif tag[0] == "class":
self.validate = False
return [ tag[1] ]
elif tag[0] == "args":
self.parser_args = tag[1]
return []
else:
tag = self.translate(*tag)
parsers = []
if tag is not None:
key = tag[0]
byname = self.db.bytag.get(key,{})
if tag[1] is None:
values = byname.itervalues()
else:
values = byname.get(tag[1],()),
if key == "id" and values:
self.validate = False
for value in values:
for parser in value:
if parser in self.parsers:
parsers.append(parser)
self.parsers.remove(parser)
return parsers
def parse(self, stream, fallback=True):
if hasattr(stream, "_cached_parser"):
parser = stream._cached_parser()
else:
parser = None
if parser is not None:
if parser.__class__ in self.parsers:
return parser
parser = self.doparse(stream, fallback)
if parser is not None:
stream._cached_parser = weakref.ref(parser)
return parser
def doparse(self, stream, fallback=True):
fb = None
warn = warning
for parser in self.parsers:
try:
parser_obj = parser(stream, validate=self.validate)
if self.parser_args:
for key, value in self.parser_args.iteritems():
setattr(parser_obj, key, value)
return parser_obj
except ValidateError, err:
res = unicode(err)
if fallback and self.fallback:
fb = parser
except HACHOIR_ERRORS, err:
res = unicode(err)
if warn:
if parser == self.other:
warn = info
warn(_("Skip parser '%s': %s") % (parser.__name__, res))
fallback = False
if self.use_fallback and fb:
warning(_("Force use of parser '%s'") % fb.__name__)
return fb(stream)
def guessParser(stream):
return QueryParser(stream.tags).parse(stream)
def createParser(filename, real_filename=None, tags=None):
"""
Create a parser from a file or returns None on error.
Options:
- filename (unicode): Input file name ;
- real_filename (str|unicode): Real file name.
"""
if not tags:
tags = []
stream = FileInputStream(filename, real_filename, tags=tags)
return guessParser(stream)
| gpl-3.0 |
gocardless/gocardless-pro-python | tests/integration/payer_authorisations_integration_test.py | 1 | 26990 | # WARNING: Do not edit by hand, this file was generated by Crank:
#
# https://github.com/gocardless/crank
#
import json
import requests
import responses
from nose.tools import (
assert_equal,
assert_is_instance,
assert_is_none,
assert_is_not_none,
assert_not_equal,
assert_raises
)
from gocardless_pro.errors import MalformedResponseError
from gocardless_pro import resources
from gocardless_pro import list_response
from .. import helpers
@responses.activate
def test_payer_authorisations_get():
fixture = helpers.load_fixture('payer_authorisations')['get']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.get(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
@responses.activate
def test_timeout_payer_authorisations_get_retries():
fixture = helpers.load_fixture('payer_authorisations')['get']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.get(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
def test_502_payer_authorisations_get_retries():
fixture = helpers.load_fixture('payer_authorisations')['get']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.get(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
@responses.activate
def test_payer_authorisations_create():
fixture = helpers.load_fixture('payer_authorisations')['create']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.create(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
@responses.activate
def test_payer_authorisations_create_new_idempotency_key_for_each_call():
fixture = helpers.load_fixture('payer_authorisations')['create']
helpers.stub_response(fixture)
helpers.client.payer_authorisations.create(*fixture['url_params'])
helpers.client.payer_authorisations.create(*fixture['url_params'])
assert_not_equal(responses.calls[0].request.headers.get('Idempotency-Key'),
responses.calls[1].request.headers.get('Idempotency-Key'))
def test_timeout_payer_authorisations_create_idempotency_conflict():
create_fixture = helpers.load_fixture('payer_authorisations')['create']
get_fixture = helpers.load_fixture('payer_authorisations')['get']
with helpers.stub_timeout_then_idempotency_conflict(create_fixture, get_fixture) as rsps:
response = helpers.client.payer_authorisations.create(*create_fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_is_instance(response, resources.PayerAuthorisation)
@responses.activate
def test_timeout_payer_authorisations_create_retries():
fixture = helpers.load_fixture('payer_authorisations')['create']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.create(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
def test_502_payer_authorisations_create_retries():
fixture = helpers.load_fixture('payer_authorisations')['create']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.create(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
@responses.activate
def test_payer_authorisations_update():
fixture = helpers.load_fixture('payer_authorisations')['update']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.update(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
@responses.activate
def test_timeout_payer_authorisations_update_retries():
fixture = helpers.load_fixture('payer_authorisations')['update']
with helpers.stub_timeout_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.update(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
def test_502_payer_authorisations_update_retries():
fixture = helpers.load_fixture('payer_authorisations')['update']
with helpers.stub_502_then_response(fixture) as rsps:
response = helpers.client.payer_authorisations.update(*fixture['url_params'])
assert_equal(2, len(rsps.calls))
assert_equal(rsps.calls[0].request.headers.get('Idempotency-Key'),
rsps.calls[1].request.headers.get('Idempotency-Key'))
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
@responses.activate
def test_payer_authorisations_submit():
fixture = helpers.load_fixture('payer_authorisations')['submit']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.submit(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
def test_timeout_payer_authorisations_submit_doesnt_retry():
fixture = helpers.load_fixture('payer_authorisations')['submit']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.payer_authorisations.submit(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_payer_authorisations_submit_doesnt_retry():
fixture = helpers.load_fixture('payer_authorisations')['submit']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.payer_authorisations.submit(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
@responses.activate
def test_payer_authorisations_confirm():
fixture = helpers.load_fixture('payer_authorisations')['confirm']
helpers.stub_response(fixture)
response = helpers.client.payer_authorisations.confirm(*fixture['url_params'])
body = fixture['body']['payer_authorisations']
assert_is_instance(response, resources.PayerAuthorisation)
assert_is_not_none(responses.calls[-1].request.headers.get('Idempotency-Key'))
assert_equal(response.created_at, body.get('created_at'))
assert_equal(response.id, body.get('id'))
assert_equal(response.incomplete_fields, body.get('incomplete_fields'))
assert_equal(response.status, body.get('status'))
assert_equal(response.bank_account.account_holder_name,
body.get('bank_account')['account_holder_name'])
assert_equal(response.bank_account.account_number,
body.get('bank_account')['account_number'])
assert_equal(response.bank_account.account_number_ending,
body.get('bank_account')['account_number_ending'])
assert_equal(response.bank_account.account_number_suffix,
body.get('bank_account')['account_number_suffix'])
assert_equal(response.bank_account.account_type,
body.get('bank_account')['account_type'])
assert_equal(response.bank_account.bank_code,
body.get('bank_account')['bank_code'])
assert_equal(response.bank_account.branch_code,
body.get('bank_account')['branch_code'])
assert_equal(response.bank_account.country_code,
body.get('bank_account')['country_code'])
assert_equal(response.bank_account.currency,
body.get('bank_account')['currency'])
assert_equal(response.bank_account.iban,
body.get('bank_account')['iban'])
assert_equal(response.bank_account.metadata,
body.get('bank_account')['metadata'])
assert_equal(response.customer.address_line1,
body.get('customer')['address_line1'])
assert_equal(response.customer.address_line2,
body.get('customer')['address_line2'])
assert_equal(response.customer.address_line3,
body.get('customer')['address_line3'])
assert_equal(response.customer.city,
body.get('customer')['city'])
assert_equal(response.customer.company_name,
body.get('customer')['company_name'])
assert_equal(response.customer.country_code,
body.get('customer')['country_code'])
assert_equal(response.customer.danish_identity_number,
body.get('customer')['danish_identity_number'])
assert_equal(response.customer.email,
body.get('customer')['email'])
assert_equal(response.customer.family_name,
body.get('customer')['family_name'])
assert_equal(response.customer.given_name,
body.get('customer')['given_name'])
assert_equal(response.customer.locale,
body.get('customer')['locale'])
assert_equal(response.customer.metadata,
body.get('customer')['metadata'])
assert_equal(response.customer.postal_code,
body.get('customer')['postal_code'])
assert_equal(response.customer.region,
body.get('customer')['region'])
assert_equal(response.customer.swedish_identity_number,
body.get('customer')['swedish_identity_number'])
assert_equal(response.links.bank_account,
body.get('links')['bank_account'])
assert_equal(response.links.customer,
body.get('links')['customer'])
assert_equal(response.links.mandate,
body.get('links')['mandate'])
assert_equal(response.mandate.metadata,
body.get('mandate')['metadata'])
assert_equal(response.mandate.payer_ip_address,
body.get('mandate')['payer_ip_address'])
assert_equal(response.mandate.reference,
body.get('mandate')['reference'])
assert_equal(response.mandate.scheme,
body.get('mandate')['scheme'])
def test_timeout_payer_authorisations_confirm_doesnt_retry():
fixture = helpers.load_fixture('payer_authorisations')['confirm']
with helpers.stub_timeout(fixture) as rsps:
with assert_raises(requests.ConnectTimeout):
response = helpers.client.payer_authorisations.confirm(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
def test_502_payer_authorisations_confirm_doesnt_retry():
fixture = helpers.load_fixture('payer_authorisations')['confirm']
with helpers.stub_502(fixture) as rsps:
with assert_raises(MalformedResponseError):
response = helpers.client.payer_authorisations.confirm(*fixture['url_params'])
assert_equal(1, len(rsps.calls))
| mit |
azulpanda/azulpanda_html | lib/werkzeug/exceptions.py | 316 | 17799 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extend this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and '
'there is no forwarding address.</p><p>If you followed a link '
'from a foreign page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
if getattr(obj, 'code', None) is not None:
default_exceptions[obj.code] = obj
__all__.append(obj.__name__)
except TypeError: # pragma: no cover
continue
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
| apache-2.0 |
particl/particl-core | test/functional/rpc_part_signmessage.py | 2 | 2362 | #!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for signing and verifying messages."""
from test_framework.test_particl import ParticlTestFramework
from test_framework.util import assert_equal
class SignMessagesTest(ParticlTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self, split=False):
self.add_nodes(self.num_nodes, extra_args=self.extra_args)
self.start_nodes()
def run_test(self):
self.import_genesis_coins_a(self.nodes[0])
message = 'This is just a test message'
self.log.info('test signing with priv_key')
priv_key = '7shnesmjFcQZoxXCsNV55v7hrbQMtBfMNscuBkYrLa1mcJNPbXhU'
address = 'pX9N6S76ZtA5BfsiJmqBbjaEgLMHpt58it'
expected_signature = 'H/ededxXrX9m9uygWRZyfdpEKiKbsHpXZtdWqM1BP+AfDZVV1y0YRcOsGmyKEmDoD7R8Tqa2ptk3XAm71ELGZLo='
signature = self.nodes[0].signmessagewithprivkey(priv_key, message)
assert_equal(expected_signature, signature)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test signing with an address with wallet')
address = self.nodes[0].getnewaddress()
signature = self.nodes[0].signmessage(address, message)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test signing with a 256bit address with wallet')
address = self.nodes[0].getnewaddress('', False, False, True)
signature = self.nodes[0].signmessage(address, message)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test verifying with another address should not work')
other_address = self.nodes[0].getnewaddress()
other_signature = self.nodes[0].signmessage(other_address, message)
assert(not self.nodes[0].verifymessage(other_address, signature, message))
assert(not self.nodes[0].verifymessage(address, other_signature, message))
if __name__ == '__main__':
SignMessagesTest().main()
| mit |
shurihell/testasia | lms/djangoapps/instructor/tests/test_enrollment.py | 27 | 29497 | # -*- coding: utf-8 -*-
"""
Unit tests for instructor.enrollment methods.
"""
import json
import mock
from mock import patch
from abc import ABCMeta
from courseware.models import StudentModule
from django.conf import settings
from django.test import TestCase
from django.utils.translation import get_language
from django.utils.translation import override as override_language
from nose.plugins.attrib import attr
from ccx_keys.locator import CCXLocator
from student.tests.factories import UserFactory
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from lms.djangoapps.ccx.tests.factories import CcxFactory
from student.models import CourseEnrollment, CourseEnrollmentAllowed
from student.roles import CourseCcxCoachRole
from student.tests.factories import (
AdminFactory
)
from instructor.enrollment import (
EmailEnrollmentState,
enroll_email,
get_email_params,
reset_student_attempts,
send_beta_role_email,
unenroll_email,
render_message_to_string,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from submissions import api as sub_api
from student.models import anonymous_id_for_user
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase, TEST_DATA_SPLIT_MODULESTORE
@attr('shard_1')
class TestSettableEnrollmentState(TestCase):
""" Test the basis class for enrollment tests. """
def setUp(self):
super(TestSettableEnrollmentState, self).setUp()
self.course_key = SlashSeparatedCourseKey('Robot', 'fAKE', 'C-%-se-%-ID')
def test_mes_create(self):
"""
Test SettableEnrollmentState creation of user.
"""
mes = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
# enrollment objects
eobjs = mes.create_user(self.course_key)
ees = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(mes, ees)
class TestEnrollmentChangeBase(TestCase):
"""
Test instructor enrollment administration against database effects.
Test methods in derived classes follow a strict format.
`action` is a function which is run
the test will pass if `action` mutates state from `before_ideal` to `after_ideal`
"""
__metaclass__ = ABCMeta
def setUp(self):
super(TestEnrollmentChangeBase, self).setUp()
self.course_key = SlashSeparatedCourseKey('Robot', 'fAKE', 'C-%-se-%-ID')
def _run_state_change_test(self, before_ideal, after_ideal, action):
"""
Runs a state change test.
`before_ideal` and `after_ideal` are SettableEnrollmentState's
`action` is a function which will be run in the middle.
`action` should transition the world from before_ideal to after_ideal
`action` will be supplied the following arguments (None-able arguments)
`email` is an email string
"""
# initialize & check before
print "checking initialization..."
eobjs = before_ideal.create_user(self.course_key)
before = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(before, before_ideal)
# do action
print "running action..."
action(eobjs.email)
# check after
print "checking effects..."
after = EmailEnrollmentState(self.course_key, eobjs.email)
self.assertEqual(after, after_ideal)
@attr('shard_1')
class TestInstructorEnrollDB(TestEnrollmentChangeBase):
""" Test instructor.enrollment.enroll_email """
def test_enroll(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_again(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_again(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_autoenroll(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True,
)
action = lambda email: enroll_email(self.course_key, email, auto_enroll=True)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_enroll_nouser_change_autoenroll(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True,
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=False,
)
action = lambda email: enroll_email(self.course_key, email, auto_enroll=False)
return self._run_state_change_test(before_ideal, after_ideal, action)
@attr('shard_1')
class TestInstructorUnenrollDB(TestEnrollmentChangeBase):
""" Test instructor.enrollment.unenroll_email """
def test_unenroll(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=True,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_notenrolled(self):
before_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=True,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_disallow(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=True,
auto_enroll=True
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
def test_unenroll_norecord(self):
before_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
after_ideal = SettableEnrollmentState(
user=False,
enrollment=False,
allowed=False,
auto_enroll=False
)
action = lambda email: unenroll_email(self.course_key, email)
return self._run_state_change_test(before_ideal, after_ideal, action)
@attr('shard_1')
class TestInstructorEnrollmentStudentModule(SharedModuleStoreTestCase):
""" Test student module manipulations. """
@classmethod
def setUpClass(cls):
super(TestInstructorEnrollmentStudentModule, cls).setUpClass()
cls.course = CourseFactory(
name='fake',
org='course',
run='id',
)
# pylint: disable=no-member
cls.course_key = cls.course.location.course_key
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.parent = ItemFactory(
category="library_content",
parent=cls.course,
publish_item=True,
)
cls.child = ItemFactory(
category="html",
parent=cls.parent,
publish_item=True,
)
cls.unrelated = ItemFactory(
category="html",
parent=cls.course,
publish_item=True,
)
def setUp(self):
super(TestInstructorEnrollmentStudentModule, self).setUp()
self.user = UserFactory()
parent_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
child_state = json.dumps({'attempts': 10, 'whatever': 'things'})
unrelated_state = json.dumps({'attempts': 12, 'brains': 'zombie'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.parent.location,
state=parent_state,
)
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.child.location,
state=child_state,
)
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=self.unrelated.location,
state=unrelated_state,
)
def test_reset_student_attempts(self):
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=msk,
state=original_state
)
# lambda to reload the module state from the database
module = lambda: StudentModule.objects.get(student=self.user, course_id=self.course_key, module_state_key=msk)
self.assertEqual(json.loads(module().state)['attempts'], 32)
reset_student_attempts(self.course_key, self.user, msk)
self.assertEqual(json.loads(module().state)['attempts'], 0)
def test_delete_student_attempts(self):
msk = self.course_key.make_usage_key('dummy', 'module')
original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'})
StudentModule.objects.create(
student=self.user,
course_id=self.course_key,
module_state_key=msk,
state=original_state
)
self.assertEqual(
StudentModule.objects.filter(
student=self.user,
course_id=self.course_key,
module_state_key=msk
).count(), 1)
reset_student_attempts(self.course_key, self.user, msk, delete_module=True)
self.assertEqual(
StudentModule.objects.filter(
student=self.user,
course_id=self.course_key,
module_state_key=msk
).count(), 0)
# Disable the score change signal to prevent other components from being
# pulled into tests.
@mock.patch('courseware.module_render.SCORE_CHANGED.send')
def test_delete_submission_scores(self, _lti_mock):
user = UserFactory()
problem_location = self.course_key.make_usage_key('dummy', 'module')
# Create a student module for the user
StudentModule.objects.create(
student=user,
course_id=self.course_key,
module_state_key=problem_location,
state=json.dumps({})
)
# Create a submission and score for the student using the submissions API
student_item = {
'student_id': anonymous_id_for_user(user, self.course_key),
'course_id': self.course_key.to_deprecated_string(),
'item_id': problem_location.to_deprecated_string(),
'item_type': 'openassessment'
}
submission = sub_api.create_submission(student_item, 'test answer')
sub_api.set_score(submission['uuid'], 1, 2)
# Delete student state using the instructor dash
reset_student_attempts(
self.course_key, user, problem_location,
delete_module=True
)
# Verify that the student's scores have been reset in the submissions API
score = sub_api.get_score(student_item)
self.assertIs(score, None)
def get_state(self, location):
"""Reload and grab the module state from the database"""
return StudentModule.objects.get(
student=self.user, course_id=self.course_key, module_state_key=location
).state
def test_reset_student_attempts_children(self):
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(parent_state['attempts'], 32)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 10)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
reset_student_attempts(self.course_key, self.user, self.parent.location)
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(json.loads(self.get_state(self.parent.location))['attempts'], 0)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 0)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
def test_delete_submission_scores_attempts_children(self):
parent_state = json.loads(self.get_state(self.parent.location))
self.assertEqual(parent_state['attempts'], 32)
self.assertEqual(parent_state['otherstuff'], 'alsorobots')
child_state = json.loads(self.get_state(self.child.location))
self.assertEqual(child_state['attempts'], 10)
self.assertEqual(child_state['whatever'], 'things')
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
reset_student_attempts(self.course_key, self.user, self.parent.location, delete_module=True)
self.assertRaises(StudentModule.DoesNotExist, self.get_state, self.parent.location)
self.assertRaises(StudentModule.DoesNotExist, self.get_state, self.child.location)
unrelated_state = json.loads(self.get_state(self.unrelated.location))
self.assertEqual(unrelated_state['attempts'], 12)
self.assertEqual(unrelated_state['brains'], 'zombie')
class EnrollmentObjects(object):
"""
Container for enrollment objects.
`email` - student email
`user` - student User object
`cenr` - CourseEnrollment object
`cea` - CourseEnrollmentAllowed object
Any of the objects except email can be None.
"""
def __init__(self, email, user, cenr, cea):
self.email = email
self.user = user
self.cenr = cenr
self.cea = cea
class SettableEnrollmentState(EmailEnrollmentState):
"""
Settable enrollment state.
Used for testing state changes.
SettableEnrollmentState can be constructed and then
a call to create_user will make objects which
correspond to the state represented in the SettableEnrollmentState.
"""
def __init__(self, user=False, enrollment=False, allowed=False, auto_enroll=False): # pylint: disable=super-init-not-called
self.user = user
self.enrollment = enrollment
self.allowed = allowed
self.auto_enroll = auto_enroll
def __eq__(self, other):
return self.to_dict() == other.to_dict()
def __neq__(self, other):
return not self == other
def create_user(self, course_id=None):
"""
Utility method to possibly create and possibly enroll a user.
Creates a state matching the SettableEnrollmentState properties.
Returns a tuple of (
email,
User, (optionally None)
CourseEnrollment, (optionally None)
CourseEnrollmentAllowed, (optionally None)
)
"""
# if self.user=False, then this will just be used to generate an email.
email = "robot_no_user_exists_with_this_email@edx.org"
if self.user:
user = UserFactory()
email = user.email
if self.enrollment:
cenr = CourseEnrollment.enroll(user, course_id)
return EnrollmentObjects(email, user, cenr, None)
else:
return EnrollmentObjects(email, user, None, None)
elif self.allowed:
cea = CourseEnrollmentAllowed.objects.create(
email=email,
course_id=course_id,
auto_enroll=self.auto_enroll,
)
return EnrollmentObjects(email, None, None, cea)
else:
return EnrollmentObjects(email, None, None, None)
@attr('shard_1')
class TestSendBetaRoleEmail(TestCase):
"""
Test edge cases for `send_beta_role_email`
"""
def setUp(self):
super(TestSendBetaRoleEmail, self).setUp()
self.user = UserFactory.create()
self.email_params = {'course': 'Robot Super Course'}
def test_bad_action(self):
bad_action = 'beta_tester'
error_msg = "Unexpected action received '{}' - expected 'add' or 'remove'".format(bad_action)
with self.assertRaisesRegexp(ValueError, error_msg):
send_beta_role_email(bad_action, self.user, self.email_params)
@attr('shard_1')
class TestGetEmailParamsCCX(SharedModuleStoreTestCase):
"""
Test what URLs the function get_email_params for CCX student enrollment.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestGetEmailParamsCCX, cls).setUpClass()
cls.course = CourseFactory.create()
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def setUp(self):
super(TestGetEmailParamsCCX, self).setUp()
self.coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(self.coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=self.coach)
self.course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
# Explicitly construct what we expect the course URLs to be
site = settings.SITE_NAME
self.course_url = u'https://{}/courses/{}/'.format(
site,
self.course_key
)
self.course_about_url = self.course_url + 'about'
self.registration_url = u'https://{}/register'.format(site)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_ccx_enrollment_email_params(self):
# For a CCX, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
result = get_email_params(
self.course,
True,
course_key=self.course_key,
display_name=self.ccx.display_name
)
self.assertEqual(result['display_name'], self.ccx.display_name)
self.assertEqual(result['auto_enroll'], True)
self.assertEqual(result['course_about_url'], self.course_about_url)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
@attr('shard_1')
class TestGetEmailParams(SharedModuleStoreTestCase):
"""
Test what URLs the function get_email_params returns under different
production-like conditions.
"""
@classmethod
def setUpClass(cls):
super(TestGetEmailParams, cls).setUpClass()
cls.course = CourseFactory.create()
# Explicitly construct what we expect the course URLs to be
site = settings.SITE_NAME
cls.course_url = u'https://{}/courses/{}/'.format(
site,
cls.course.id.to_deprecated_string()
)
cls.course_about_url = cls.course_url + 'about'
cls.registration_url = u'https://{}/register'.format(site)
def setUp(self):
super(TestGetEmailParams, self).setUp()
def test_normal_params(self):
# For a normal site, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
result = get_email_params(self.course, False)
self.assertEqual(result['auto_enroll'], False)
self.assertEqual(result['course_about_url'], self.course_about_url)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
def test_marketing_params(self):
# For a site with a marketing front end, what do we expect to get for the URLs?
# Also make sure `auto_enroll` is properly passed through.
with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
result = get_email_params(self.course, True)
self.assertEqual(result['auto_enroll'], True)
# We should *not* get a course about url (LMS doesn't know what the marketing site URLs are)
self.assertEqual(result['course_about_url'], None)
self.assertEqual(result['registration_url'], self.registration_url)
self.assertEqual(result['course_url'], self.course_url)
@attr('shard_1')
class TestRenderMessageToString(SharedModuleStoreTestCase):
"""
Test that email templates can be rendered in a language chosen manually.
Test CCX enrollmet email.
"""
MODULESTORE = TEST_DATA_SPLIT_MODULESTORE
@classmethod
def setUpClass(cls):
super(TestRenderMessageToString, cls).setUpClass()
cls.course = CourseFactory.create()
cls.subject_template = 'emails/enroll_email_allowedsubject.txt'
cls.message_template = 'emails/enroll_email_allowedmessage.txt'
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def setUp(self):
super(TestRenderMessageToString, self).setUp()
coach = AdminFactory.create()
role = CourseCcxCoachRole(self.course.id)
role.add_users(coach)
self.ccx = CcxFactory(course_id=self.course.id, coach=coach)
self.course_key = CCXLocator.from_course_locator(self.course.id, self.ccx.id)
def get_email_params(self):
"""
Returns a dictionary of parameters used to render an email.
"""
email_params = get_email_params(self.course, True)
email_params["email_address"] = "user@example.com"
email_params["full_name"] = "Jean Reno"
return email_params
def get_email_params_ccx(self):
"""
Returns a dictionary of parameters used to render an email for CCX.
"""
email_params = get_email_params(
self.course,
True,
course_key=self.course_key,
display_name=self.ccx.display_name
)
email_params["email_address"] = "user@example.com"
email_params["full_name"] = "Jean Reno"
return email_params
def get_subject_and_message(self, language):
"""
Returns the subject and message rendered in the specified language.
"""
return render_message_to_string(
self.subject_template,
self.message_template,
self.get_email_params(),
language=language
)
def get_subject_and_message_ccx(self, subject_template, message_template):
"""
Returns the subject and message rendered in the specified language for CCX.
"""
return render_message_to_string(
subject_template,
message_template,
self.get_email_params_ccx()
)
def test_subject_and_message_translation(self):
subject, message = self.get_subject_and_message('fr')
language_after_rendering = get_language()
you_have_been_invited_in_french = u"Vous avez été invité"
self.assertIn(you_have_been_invited_in_french, subject)
self.assertIn(you_have_been_invited_in_french, message)
self.assertEqual(settings.LANGUAGE_CODE, language_after_rendering)
def test_platform_language_is_used_for_logged_in_user(self):
with override_language('zh_CN'): # simulate a user login
subject, message = self.get_subject_and_message(None)
self.assertIn("You have been", subject)
self.assertIn("You have been", message)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_render_enrollment_message_ccx_members(self):
"""
Test enrollment email template renders for CCX.
For EDX members.
"""
subject_template = 'emails/enroll_email_enrolledsubject.txt'
message_template = 'emails/enroll_email_enrolledmessage.txt'
subject, message = self.get_subject_and_message_ccx(subject_template, message_template)
self.assertIn(self.ccx.display_name, subject)
self.assertIn(self.ccx.display_name, message)
site = settings.SITE_NAME
course_url = u'https://{}/courses/{}/'.format(
site,
self.course_key
)
self.assertIn(course_url, message)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_render_unenrollment_message_ccx_members(self):
"""
Test unenrollment email template renders for CCX.
For EDX members.
"""
subject_template = 'emails/unenroll_email_subject.txt'
message_template = 'emails/unenroll_email_enrolledmessage.txt'
subject, message = self.get_subject_and_message_ccx(subject_template, message_template)
self.assertIn(self.ccx.display_name, subject)
self.assertIn(self.ccx.display_name, message)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_render_enrollment_message_ccx_non_members(self):
"""
Test enrollment email template renders for CCX.
For non EDX members.
"""
subject_template = 'emails/enroll_email_allowedsubject.txt'
message_template = 'emails/enroll_email_allowedmessage.txt'
subject, message = self.get_subject_and_message_ccx(subject_template, message_template)
self.assertIn(self.ccx.display_name, subject)
self.assertIn(self.ccx.display_name, message)
site = settings.SITE_NAME
registration_url = u'https://{}/register'.format(site)
self.assertIn(registration_url, message)
@patch.dict('django.conf.settings.FEATURES', {'CUSTOM_COURSES_EDX': True})
def test_render_unenrollment_message_ccx_non_members(self):
"""
Test unenrollment email template renders for CCX.
For non EDX members.
"""
subject_template = 'emails/unenroll_email_subject.txt'
message_template = 'emails/unenroll_email_allowedmessage.txt'
subject, message = self.get_subject_and_message_ccx(subject_template, message_template)
self.assertIn(self.ccx.display_name, subject)
self.assertIn(self.ccx.display_name, message)
| agpl-3.0 |
seanwestfall/django | django/contrib/sites/managers.py | 472 | 2132 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.core import checks
from django.core.exceptions import FieldDoesNotExist
from django.db import models
class CurrentSiteManager(models.Manager):
"Use this to limit objects to those associated with the current site."
use_in_migrations = True
def __init__(self, field_name=None):
super(CurrentSiteManager, self).__init__()
self.__field_name = field_name
def check(self, **kwargs):
errors = super(CurrentSiteManager, self).check(**kwargs)
errors.extend(self._check_field_name())
return errors
def _check_field_name(self):
field_name = self._get_field_name()
try:
field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
return [
checks.Error(
"CurrentSiteManager could not find a field named '%s'." % field_name,
hint=None,
obj=self,
id='sites.E001',
)
]
if not isinstance(field, (models.ForeignKey, models.ManyToManyField)):
return [
checks.Error(
"CurrentSiteManager cannot use '%s.%s' as it is not a ForeignKey or ManyToManyField." % (
self.model._meta.object_name, field_name
),
hint=None,
obj=self,
id='sites.E002',
)
]
return []
def _get_field_name(self):
""" Return self.__field_name or 'site' or 'sites'. """
if not self.__field_name:
try:
self.model._meta.get_field('site')
except FieldDoesNotExist:
self.__field_name = 'sites'
else:
self.__field_name = 'site'
return self.__field_name
def get_queryset(self):
return super(CurrentSiteManager, self).get_queryset().filter(
**{self._get_field_name() + '__id': settings.SITE_ID})
| bsd-3-clause |
skbkontur/Diamond | src/collectors/mountstats/mountstats.py | 28 | 8068 | # coding=utf-8
"""
The function of MountStatsCollector is to parse the detailed per-mount NFS
performance statistics provided by `/proc/self/mountstats` (reads, writes,
remote procedure call count/latency, etc.) and provide counters to Diamond.
Filesystems may be included/excluded using a regular expression filter,
like the existing disk check collectors.
#### Dependencies
* /proc/self/mountstats
"""
import os
import re
import subprocess
import diamond.collector
from diamond.collector import str_to_bool
class MountStatsCollector(diamond.collector.Collector):
"""Diamond collector for statistics from /proc/self/mountstats
"""
BYTES_MAP = ['normalreadbytes', 'normalwritebytes', 'directreadbytes',
'directwritebytes', 'serverreadbytes', 'serverwritebytes']
EVENTS_MAP = ['inoderevalidates', 'dentryrevalidates',
'datainvalidates', 'attrinvalidates', 'syncinodes',
'vfsopen', 'vfslookup', 'vfspermission', 'vfsreadpage',
'vfsreadpages', 'vfswritepage', 'vfswritepages',
'vfsreaddir', 'vfsflush', 'vfsfsync', 'vfsflock',
'vfsrelease', 'setattrtrunc', 'extendwrite',
'sillyrenames', 'shortreads', 'shortwrites', 'delay']
XPRT_MAP = {'rdma': ['port', 'bind_count', 'connect_count',
'connect_time', 'idle_time', 'rpcsends',
'rpcreceives', 'badxids', 'backlogutil',
'read_chunks', 'write_chunks', 'reply_chunks',
'total_rdma_req', 'total_dma_rep', 'pullup',
'fixup', 'hardway', 'failed_marshal', 'bad_reply'],
'tcp': ['port', 'bind_count', 'connect_count',
'connect_time', 'idle_time', 'rpcsends',
'rpcreceives', 'badxids', 'backlogutil'],
'udp': ['port', 'bind_count', 'rpcsends', 'rpcreceives',
'badxids', 'backlogutil']}
RPCS_MAP = ['ACCESS', 'CLOSE', 'COMMIT', 'CREATE', 'DELEGRETURN',
'FSINFO', 'FSSTAT', 'FS_LOCATIONS', 'GETACL', 'GETATTR',
'LINK', 'LOCK', 'LOCKT', 'LOCKU', 'LOOKUP', 'LOOKUP_ROOT',
'MKDIR', 'MKNOD', 'NULL', 'OPEN', 'OPEN_CONFIRM',
'OPEN_DOWNGRADE', 'OPEN_NOATTR', 'PATHCONF', 'READ',
'READDIR', 'READDIRPLUS', 'READLINK', 'REMOVE', 'RENAME',
'RENEW', 'RMDIR', 'SERVER_CAPS', 'SETACL', 'SETATTR',
'SETCLIENTID', 'SETCLIENTID_CONFIRM', 'STATFS', 'SYMLINK',
'WRITE']
MOUNTSTATS = '/proc/self/mountstats'
def process_config(self):
super(MountStatsCollector, self).process_config()
self.exclude_filters = self.config['exclude_filters']
if isinstance(self.exclude_filters, basestring):
self.exclude_filters = [self.exclude_filters]
if len(self.exclude_filters) > 0:
self.exclude_reg = re.compile('|'.join(self.exclude_filters))
else:
self.exclude_reg = None
self.include_filters = self.config['include_filters']
if isinstance(self.include_filters, basestring):
self.include_filters = [self.include_filters]
if len(self.include_filters) > 0:
self.include_reg = re.compile('|'.join(self.include_filters))
else:
self.include_reg = None
def get_default_config_help(self):
config_help = super(MountStatsCollector,
self).get_default_config_help()
config_help.update({
'exclude_filters':
"A list of regex patterns. Any filesystem" +
" matching any of these patterns will be excluded from" +
" mount stats metrics collection.",
'include_filters':
"A list of regex patterns. Any filesystem" +
" matching any of these patterns will be included from" +
" mount stats metrics collection.",
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
config = super(MountStatsCollector, self).get_default_config()
config.update({
'exclude_filters': [],
'include_filters': [],
'path': 'mountstats',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
})
return config
def collect(self):
"""Collect statistics from /proc/self/mountstats.
Currently, we do fairly naive parsing and do not actually check
the statvers value returned by mountstats.
"""
if str_to_bool(self.config['use_sudo']):
if not os.access(self.config['sudo_cmd'], os.X_OK):
self.log.error("Cannot find or exec %s"
% self.config['sudo_cmd'])
return None
command = [self.config['sudo_cmd'], '/bin/cat', self.MOUNTSTATS]
p = subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0][:-1]
lines = p.split("\n")
else:
if not os.access(self.MOUNTSTATS, os.R_OK):
self.log.error("Cannot read path %s" % self.MOUNTSTATS)
return None
f = open(self.MOUNTSTATS)
lines = f.readlines()
f.close()
path = None
for line in lines:
tokens = line.split()
if len(tokens) == 0:
continue
if tokens[0] == 'device':
path = tokens[4]
skip = False
if self.exclude_reg:
skip = self.exclude_reg.match(path)
if self.include_reg:
skip = not self.include_reg.match(path)
if skip:
self.log.debug("Ignoring %s", path)
else:
self.log.debug("Keeping %s", path)
path = path.replace('.', '_')
path = path.replace('/', '_')
elif skip:
# If we are in a skip state, don't pay any attention to
# anything that isn't the next device line
continue
elif tokens[0] == 'events:':
for i in range(0, len(self.EVENTS_MAP)):
metric_name = "%s.events.%s" % (path, self.EVENTS_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'bytes:':
for i in range(0, len(self.BYTES_MAP)):
metric_name = "%s.bytes.%s" % (path, self.BYTES_MAP[i])
metric_value = long(tokens[i + 1])
self.publish_counter(metric_name, metric_value)
elif tokens[0] == 'xprt:':
proto = tokens[1]
if not self.XPRT_MAP[proto]:
self.log.error("Unknown protocol %s", proto)
continue
for i in range(0, len(self.XPRT_MAP[proto])):
metric_name = "%s.xprt.%s.%s" % (path, proto,
self.XPRT_MAP[proto][i])
metric_value = long(tokens[i + 2])
self.publish_counter(metric_name, metric_value)
elif tokens[0][:-1] in self.RPCS_MAP:
rpc = tokens[0][:-1]
ops = long(tokens[1])
rtt = long(tokens[7])
exe = long(tokens[8])
metric_fmt = "%s.rpc.%s.%s"
ops_name = metric_fmt % (path, rpc.lower(), 'ops')
rtt_name = metric_fmt % (path, rpc.lower(), 'rtt')
exe_name = metric_fmt % (path, rpc.lower(), 'exe')
self.publish_counter(ops_name, ops)
self.publish_counter(rtt_name, rtt)
self.publish_counter(exe_name, exe)
| mit |
ayushagrawal288/zamboni | mkt/developers/tests/test_cron.py | 13 | 6326 | # -*- coding: utf-8 -*-
import datetime
import mock
from nose.tools import eq_
import mkt
from mkt.developers.cron import (_flag_rereview_adult, exclude_new_region,
process_iarc_changes, send_new_region_emails)
from mkt.developers.models import ActivityLog
from mkt.site.tests import TestCase, user_factory, WebappTestCase
from mkt.site.utils import app_factory
from mkt.webapps.models import IARCInfo, RatingDescriptors, RatingInteractives
class TestSendNewRegionEmails(WebappTestCase):
@mock.patch('mkt.developers.cron._region_email')
def test_called(self, _region_email_mock):
eq_(self.app.enable_new_regions, True)
send_new_region_emails([mkt.regions.GBR])
eq_(list(_region_email_mock.call_args_list[0][0][0]), [self.app.id])
@mock.patch('mkt.developers.cron._region_email')
def test_not_called_with_exclusions(self, _region_email_mock):
self.app.addonexcludedregion.create(region=mkt.regions.GBR.id)
send_new_region_emails([mkt.regions.GBR])
eq_(list(_region_email_mock.call_args_list[0][0][0]), [])
@mock.patch('mkt.developers.cron._region_email')
def test_not_called_with_enable_new_regions_false(self,
_region_email_mock):
"""Check enable_new_regions is False by default."""
self.app.update(enable_new_regions=False)
send_new_region_emails([mkt.regions.GBR])
eq_(list(_region_email_mock.call_args_list[0][0][0]), [])
class TestExcludeNewRegion(WebappTestCase):
@mock.patch('mkt.developers.cron._region_exclude')
def test_not_called_enable_new_regions_true(self, _region_exclude_mock):
eq_(self.app.enable_new_regions, True)
exclude_new_region([mkt.regions.GBR])
eq_(list(_region_exclude_mock.call_args_list[0][0][0]), [])
@mock.patch('mkt.developers.cron._region_exclude')
def test_not_called_with_ordinary_exclusions(self, _region_exclude_mock):
self.app.addonexcludedregion.create(region=mkt.regions.GBR.id)
exclude_new_region([mkt.regions.GBR])
eq_(list(_region_exclude_mock.call_args_list[0][0][0]), [])
@mock.patch('mkt.developers.cron._region_exclude')
def test_called_with_enable_new_regions_false(self, _region_exclude_mock):
# Check enable_new_regions is False by default.
self.app.update(enable_new_regions=False)
exclude_new_region([mkt.regions.GBR])
eq_(list(_region_exclude_mock.call_args_list[0][0][0]), [self.app.id])
class TestIARCChangesCron(TestCase):
@mock.patch('lib.iarc.utils.render_xml')
def test_no_date(self, _render):
process_iarc_changes()
_render.assert_called_with('get_rating_changes.xml', {
'date_from': datetime.date.today() - datetime.timedelta(days=1),
'date_to': datetime.date.today(),
})
@mock.patch('lib.iarc.utils.render_xml')
def test_with_date(self, _render):
date = datetime.date(2001, 1, 11)
process_iarc_changes(date.strftime('%Y-%m-%d'))
_render.assert_called_with('get_rating_changes.xml', {
'date_from': date - datetime.timedelta(days=1),
'date_to': date,
})
def test_processing(self):
"""
The mock client always returns the same data. Set up the app so it
matches the submission ID and verify the data is saved as expected.
"""
mkt.set_user(user_factory())
app = app_factory()
IARCInfo.objects.create(addon=app, submission_id=52,
security_code='FZ32CU8')
app.set_descriptors([
'has_classind_violence',
'has_esrb_strong_lang',
'has_pegi_language', 'has_pegi_online',
'has_usk_lang',
])
app.set_interactives([])
app.set_content_ratings({
mkt.ratingsbodies.CLASSIND: mkt.ratingsbodies.CLASSIND_L
})
process_iarc_changes()
app = app.reload()
# Check ratings. CLASSIND should get updated.
cr = app.content_ratings.get(
ratings_body=mkt.ratingsbodies.CLASSIND.id)
eq_(cr.rating, mkt.ratingsbodies.CLASSIND_14.id)
cr = app.content_ratings.get(ratings_body=mkt.ratingsbodies.ESRB.id)
eq_(cr.rating, mkt.ratingsbodies.ESRB_M.id)
assert ActivityLog.objects.filter(
action=mkt.LOG.CONTENT_RATING_CHANGED.id).count()
# Check descriptors.
rd = RatingDescriptors.objects.get(addon=app)
self.assertSetEqual(rd.to_keys(), [
'has_esrb_strong_lang',
'has_classind_lang',
'has_pegi_lang', 'has_pegi_online',
'has_usk_lang',
])
# Check interactives.
ri = RatingInteractives.objects.get(addon=app)
self.assertSetEqual(ri.to_keys(), [
'has_shares_info', 'has_shares_location', 'has_digital_purchases',
'has_users_interact'
])
def test_rereview_flag_adult(self):
mkt.set_user(user_factory())
app = app_factory()
app.set_content_ratings({
mkt.ratingsbodies.ESRB: mkt.ratingsbodies.ESRB_E,
mkt.ratingsbodies.CLASSIND: mkt.ratingsbodies.CLASSIND_18,
})
_flag_rereview_adult(app, mkt.ratingsbodies.ESRB,
mkt.ratingsbodies.ESRB_T)
assert not app.rereviewqueue_set.count()
assert not ActivityLog.objects.filter(
action=mkt.LOG.CONTENT_RATING_TO_ADULT.id).exists()
# Adult should get flagged to rereview.
_flag_rereview_adult(app, mkt.ratingsbodies.ESRB,
mkt.ratingsbodies.ESRB_A)
eq_(app.rereviewqueue_set.count(), 1)
eq_(ActivityLog.objects.filter(
action=mkt.LOG.CONTENT_RATING_TO_ADULT.id).count(), 1)
# Test things same same if rating stays the same as adult.
app.set_content_ratings({
mkt.ratingsbodies.ESRB: mkt.ratingsbodies.ESRB_A,
})
_flag_rereview_adult(app, mkt.ratingsbodies.ESRB,
mkt.ratingsbodies.ESRB_A)
eq_(app.rereviewqueue_set.count(), 1)
eq_(ActivityLog.objects.filter(
action=mkt.LOG.CONTENT_RATING_TO_ADULT.id).count(), 1)
| bsd-3-clause |
ESRC-CDRC/ckan-datapusher-service | datapusher/jobs.py | 1 | 14146 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import urllib2
import socket
import requests
import urlparse
import itertools
import datetime
import locale
import pprint
import logging
import decimal
import hashlib
import cStringIO
import time
import ssl
import messytables
from slugify import slugify
import ckanserviceprovider.job as job
import ckanserviceprovider.util as util
from ckanserviceprovider import web
if not locale.getlocale()[0]:
locale.setlocale(locale.LC_ALL, '')
MAX_CONTENT_LENGTH = web.app.config.get('MAX_CONTENT_LENGTH') or 10485760
DOWNLOAD_TIMEOUT = 30
_TYPE_MAPPING = {
'String': 'text',
# 'int' may not be big enough,
# and type detection may not realize it needs to be big
'Integer': 'numeric',
'Decimal': 'numeric',
'DateUtil': 'timestamp'
}
_TYPES = [messytables.StringType, messytables.DecimalType,
messytables.IntegerType, messytables.DateUtilType]
TYPE_MAPPING = web.app.config.get('TYPE_MAPPING', _TYPE_MAPPING)
TYPES = web.app.config.get('TYPES', _TYPES)
DATASTORE_URLS = {
'datastore_delete': '{ckan_url}/api/action/datastore_delete',
'resource_update': '{ckan_url}/api/action/resource_update'
}
class HTTPError(util.JobError):
"""Exception that's raised if a job fails due to an HTTP problem."""
def __init__(self, message, status_code, request_url, response):
"""Initialise a new HTTPError.
:param message: A human-readable error message
:type message: string
:param status_code: The status code of the errored HTTP response,
e.g. 500
:type status_code: int
:param request_url: The URL that was requested
:type request_url: string
:param response: The body of the errored HTTP response as unicode
(if you have a requests.Response object then response.text will
give you this)
:type response: unicode
"""
super(HTTPError, self).__init__(message)
self.status_code = status_code
self.request_url = request_url
self.response = response
def as_dict(self):
"""Return a JSON-serializable dictionary representation of this error.
Suitable for ckanserviceprovider to return to the client site as the
value for the "error" key in the job dict.
"""
if self.response and len(self.response) > 200:
response = self.response[:200] + '...'
else:
response = self.response
return {
"message": self.message,
"HTTP status code": self.status_code,
"Requested URL": self.request_url,
"Response": response,
}
def get_url(action, ckan_url):
"""
Get url for ckan action
"""
if not urlparse.urlsplit(ckan_url).scheme:
ckan_url = 'http://' + ckan_url.lstrip('/')
ckan_url = ckan_url.rstrip('/')
return '{ckan_url}/api/3/action/{action}'.format(
ckan_url=ckan_url, action=action)
def check_response(response, request_url, who, good_status=(201, 200), ignore_no_success=False):
"""
Checks the response and raises exceptions if something went terribly wrong
:param who: A short name that indicated where the error occurred
(for example "CKAN")
:param good_status: Status codes that should not raise an exception
"""
if not response.status_code:
raise HTTPError(
'DataPusher received an HTTP response with no status code',
status_code=None, request_url=request_url, response=response.text)
message = '{who} bad response. Status code: {code} {reason}. At: {url}.'
try:
if not response.status_code in good_status:
json_response = response.json()
if not ignore_no_success or json_response.get('success'):
try:
message = json_response["error"]["message"]
except Exception:
message = message.format(
who=who, code=response.status_code,
reason=response.reason, url=request_url)
raise HTTPError(
message, status_code=response.status_code,
request_url=request_url, response=response.text)
except ValueError as err:
message = message.format(
who=who, code=response.status_code, reason=response.reason,
url=request_url, resp=response.text[:200])
raise HTTPError(
message, status_code=response.status_code, request_url=request_url,
response=response.text)
def chunky(iterable, n):
"""
Generates chunks of data that can be loaded into ckan
:param n: Size of each chunks
:type n: int
"""
it = iter(iterable)
item = list(itertools.islice(it, n))
while item:
yield item
item = list(itertools.islice(it, n))
class DatastoreEncoder(json.JSONEncoder):
# Custon JSON encoder
def default(self, obj):
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return str(obj)
return json.JSONEncoder.default(self, obj)
def delete_datastore_resource(resource_id, api_key, ckan_url):
try:
delete_url = get_url('datastore_delete', ckan_url)
response = requests.post(delete_url,
data=json.dumps({'id': resource_id,
'force': True}),
headers={'Content-Type': 'application/json',
'Authorization': api_key},
verify=False
)
check_response(response, delete_url, 'CKAN',
good_status=(201, 200, 404), ignore_no_success=True)
except requests.exceptions.RequestException:
raise util.JobError('Deleting existing datastore failed.')
def send_resource_to_datastore(resource, headers, records, api_key, ckan_url):
"""
Stores records in CKAN datastore
"""
request = {'resource_id': resource['id'],
'fields': headers,
'force': True,
'records': records}
name = resource.get('name')
url = get_url('datastore_create', ckan_url)
r = requests.post(url,
data=json.dumps(request, cls=DatastoreEncoder),
headers={'Content-Type': 'application/json',
'Authorization': api_key},
verify=False
)
check_response(r, url, 'CKAN DataStore')
def update_resource(resource, api_key, ckan_url):
"""
Update webstore_url and webstore_last_updated in CKAN
"""
resource['url_type'] = 'datapusher'
url = get_url('resource_update', ckan_url)
r = requests.post(
url,
data=json.dumps(resource),
headers={'Content-Type': 'application/json',
'Authorization': api_key},
verify=False
)
check_response(r, url, 'CKAN')
def get_resource(resource_id, ckan_url, api_key):
"""
Gets available information about the resource from CKAN
"""
url = get_url('resource_show', ckan_url)
r = requests.post(url,
data=json.dumps({'id': resource_id}),
headers={'Content-Type': 'application/json',
'Authorization': api_key},
verify=False
)
check_response(r, url, 'CKAN')
return r.json()['result']
def validate_input(input):
# Especially validate metdata which is provided by the user
if not 'metadata' in input:
raise util.JobError('Metadata missing')
data = input['metadata']
if not 'resource_id' in data:
raise util.JobError('No id provided.')
if not 'ckan_url' in data:
raise util.JobError('No ckan_url provided.')
if not input.get('api_key'):
raise util.JobError('No CKAN API key provided')
def resource_conflict(job1, job2):
""" Jobs operate on the same resource should be sequentially run."""
if job1['resource_id'] == job2['resource_id']:
return True
else:
return False
@job.async(check_conflict=resource_conflict)
def push_to_datastore(task_id, input, dry_run=False):
'''Download and parse a resource push its data into CKAN's DataStore.
An asynchronous job that gets a resource from CKAN, downloads the
resource's data file and, if the data file has changed since last time,
parses the data and posts it into CKAN's DataStore.
:param dry_run: Fetch and parse the data file but don't actually post the
data to the DataStore, instead return the data headers and rows that
would have been posted.
:type dry_run: boolean
'''
handler = util.StoringHandler(task_id, input)
logger = logging.getLogger(task_id)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
validate_input(input)
data = input['metadata']
ckan_url = data['ckan_url']
resource_id = data['resource_id']
api_key = input.get('api_key')
limit = int(data.get('limit', -1))
try:
resource = get_resource(resource_id, ckan_url, api_key)
except util.JobError, e:
#try again in 5 seconds just incase CKAN is slow at adding resource
time.sleep(5)
resource = get_resource(resource_id, ckan_url, api_key)
# fetch the resource data
logger.info('Fetching from: {0}'.format(resource.get('url')))
try:
request = urllib2.Request(resource.get('url'))
if resource.get('url_type') == 'upload':
# If this is an uploaded file to CKAN, authenticate the request,
# otherwise we won't get file from private resources
request.add_header('Authorization', api_key)
#response = urllib2.urlopen(request, timeout=DOWNLOAD_TIMEOUT)
context = ssl._create_unverified_context()
response = urllib2.urlopen(request, timeout=DOWNLOAD_TIMEOUT, context=context)
except urllib2.HTTPError as e:
raise HTTPError(
"DataPusher received a bad HTTP response when trying to download "
"the data file", status_code=e.code,
request_url=resource.get('url'), response=e.read())
except urllib2.URLError as e:
if isinstance(e.reason, socket.timeout):
raise util.JobError('Connection timed out after %ss' %
DOWNLOAD_TIMEOUT)
else:
raise HTTPError(
message=str(e.reason), status_code=None,
request_url=resource.get('url'), response=None)
cl = response.info().getheader('content-length')
if cl and int(cl) > MAX_CONTENT_LENGTH:
raise util.JobError(
'Resource too large to download: {cl} > max ({max_cl}).'.format(
cl=cl, max_cl=MAX_CONTENT_LENGTH))
ct = response.info().getheader('content-type').split(';', 1)[0]
f = cStringIO.StringIO(response.read())
file_hash = hashlib.md5(f.read()).hexdigest()
f.seek(0)
if (resource.get('hash') == file_hash
and not data.get('ignore_hash')):
logger.info("The file hash hasn't changed: {hash}.".format(
hash=file_hash))
return
resource['hash'] = file_hash
try:
table_set = messytables.any_tableset(f, mimetype=ct, extension=ct)
except messytables.ReadError as e:
## try again with format
f.seek(0)
try:
format = resource.get('format')
table_set = messytables.any_tableset(f, mimetype=format, extension=format)
except:
raise util.JobError(e)
row_set = table_set.tables.pop()
offset, headers = messytables.headers_guess(row_set.sample)
row_set.register_processor(messytables.headers_processor(headers))
row_set.register_processor(messytables.offset_processor(offset + 1))
types = messytables.type_guess(row_set.sample, types=TYPES, strict=True)
row_set.register_processor(messytables.types_processor(types))
headers = [header.strip() for header in headers if header.strip()]
headers_set = set(headers)
def row_iterator(limit=-1):
count = None if limit < 0 else limit
for row in row_set:
data_row = {}
if count is not None and count <= 0:
return
elif count is not None:
count -= 1
for index, cell in enumerate(row):
column_name = cell.column.strip()
if column_name not in headers_set:
continue
data_row[column_name] = cell.value
yield data_row
result = row_iterator(limit)
'''
Delete existing datstore resource before proceeding. Otherwise
'datastore_create' will append to the existing datastore. And if
the fields have significantly changed, it may also fail.
'''
logger.info('Deleting "{res_id}" from datastore.'.format(
res_id=resource_id))
delete_datastore_resource(resource_id, api_key, ckan_url)
headers_dicts = [dict(id=field[0], type=TYPE_MAPPING[str(field[1])])
for field in zip(headers, types)]
logger.info('Determined headers and types: {headers}'.format(
headers=headers_dicts))
if dry_run:
return headers_dicts, result
if limit > 0:
logger.info('Uploading only the first {limit} entries.'.format(
limit=limit))
count = 0
for i, records in enumerate(chunky(result, 250)):
count += len(records)
logger.info('Saving chunk {number}'.format(number=i))
send_resource_to_datastore(resource, headers_dicts,
records, api_key, ckan_url)
logger.info('Successfully pushed {n} entries to "{res_id}".'.format(
n=count, res_id=resource_id))
if data.get('set_url_type', False):
update_resource(resource, api_key, ckan_url)
| agpl-3.0 |
grepme/CMPUT410Lab01 | virt_env/virt1/lib/python2.7/site-packages/SQLAlchemy-0.9.8-py2.7-linux-x86_64.egg/sqlalchemy/util/langhelpers.py | 32 | 37539 | # util/langhelpers.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Routines to help with the creation, loading and introspection of
modules, classes, hierarchies, attributes, functions, and methods.
"""
import itertools
import inspect
import operator
import re
import sys
import types
import warnings
from functools import update_wrapper
from .. import exc
import hashlib
from . import compat
from . import _collections
def md5_hex(x):
if compat.py3k:
x = x.encode('utf-8')
m = hashlib.md5()
m.update(x)
return m.hexdigest()
class safe_reraise(object):
"""Reraise an exception after invoking some
handler code.
Stores the existing exception info before
invoking so that it is maintained across a potential
coroutine context switch.
e.g.::
try:
sess.commit()
except:
with safe_reraise():
sess.rollback()
"""
def __enter__(self):
self._exc_info = sys.exc_info()
def __exit__(self, type_, value, traceback):
# see #2703 for notes
if type_ is None:
exc_type, exc_value, exc_tb = self._exc_info
self._exc_info = None # remove potential circular references
compat.reraise(exc_type, exc_value, exc_tb)
else:
self._exc_info = None # remove potential circular references
compat.reraise(type_, value, traceback)
def decode_slice(slc):
"""decode a slice object as sent to __getitem__.
takes into account the 2.5 __index__() method, basically.
"""
ret = []
for x in slc.start, slc.stop, slc.step:
if hasattr(x, '__index__'):
x = x.__index__()
ret.append(x)
return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
for base in bases:
pool = itertools.chain((base,),
compat.itertools_imap(lambda i: base + str(i),
range(1000)))
for sym in pool:
if sym not in used:
used.add(sym)
yield sym
break
else:
raise NameError("exhausted namespace for symbol base %s" % base)
def decorator(target):
"""A signature-matching decorator factory."""
def decorate(fn):
if not inspect.isfunction(fn):
raise Exception("not a decoratable function")
spec = compat.inspect_getfullargspec(fn)
names = tuple(spec[0]) + spec[1:3] + (fn.__name__,)
targ_name, fn_name = _unique_symbols(names, 'target', 'fn')
metadata = dict(target=targ_name, fn=fn_name)
metadata.update(format_argspec_plus(spec, grouped=False))
metadata['name'] = fn.__name__
code = """\
def %(name)s(%(args)s):
return %(target)s(%(fn)s, %(apply_kw)s)
""" % metadata
decorated = _exec_code_in_env(code,
{targ_name: target, fn_name: fn},
fn.__name__)
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
decorated.__wrapped__ = fn
return update_wrapper(decorated, fn)
return update_wrapper(decorate, target)
def _exec_code_in_env(code, env, fn_name):
exec(code, env)
return env[fn_name]
def public_factory(target, location):
"""Produce a wrapping function for the given cls or classmethod.
Rationale here is so that the __init__ method of the
class can serve as documentation for the function.
"""
if isinstance(target, type):
fn = target.__init__
callable_ = target
doc = "Construct a new :class:`.%s` object. \n\n"\
"This constructor is mirrored as a public API function; see :func:`~%s` "\
"for a full usage and argument description." % (
target.__name__, location, )
else:
fn = callable_ = target
doc = "This function is mirrored; see :func:`~%s` "\
"for a description of arguments." % location
location_name = location.split(".")[-1]
spec = compat.inspect_getfullargspec(fn)
del spec[0][0]
metadata = format_argspec_plus(spec, grouped=False)
metadata['name'] = location_name
code = """\
def %(name)s(%(args)s):
return cls(%(apply_kw)s)
""" % metadata
env = {'cls': callable_, 'symbol': symbol}
exec(code, env)
decorated = env[location_name]
decorated.__doc__ = fn.__doc__
if compat.py2k or hasattr(fn, '__func__'):
fn.__func__.__doc__ = doc
else:
fn.__doc__ = doc
return decorated
class PluginLoader(object):
def __init__(self, group, auto_fn=None):
self.group = group
self.impls = {}
self.auto_fn = auto_fn
def load(self, name):
if name in self.impls:
return self.impls[name]()
if self.auto_fn:
loader = self.auto_fn(name)
if loader:
self.impls[name] = loader
return loader()
try:
import pkg_resources
except ImportError:
pass
else:
for impl in pkg_resources.iter_entry_points(
self.group, name):
self.impls[name] = impl.load
return impl.load()
raise exc.NoSuchModuleError(
"Can't load plugin: %s:%s" %
(self.group, name))
def register(self, name, modulepath, objname):
def load():
mod = compat.import_(modulepath)
for token in modulepath.split(".")[1:]:
mod = getattr(mod, token)
return getattr(mod, objname)
self.impls[name] = load
def get_cls_kwargs(cls, _set=None):
"""Return the full set of inherited kwargs for the given `cls`.
Probes a class's __init__ method, collecting all named arguments. If the
__init__ defines a \**kwargs catch-all, then the constructor is presumed
to pass along unrecognized keywords to its base classes, and the
collection process is repeated recursively on each of the bases.
Uses a subset of inspect.getargspec() to cut down on method overhead.
No anonymous tuple arguments please !
"""
toplevel = _set is None
if toplevel:
_set = set()
ctr = cls.__dict__.get('__init__', False)
has_init = ctr and isinstance(ctr, types.FunctionType) and \
isinstance(ctr.__code__, types.CodeType)
if has_init:
names, has_kw = inspect_func_args(ctr)
_set.update(names)
if not has_kw and not toplevel:
return None
if not has_init or has_kw:
for c in cls.__bases__:
if get_cls_kwargs(c, _set) is None:
break
_set.discard('self')
return _set
try:
# TODO: who doesn't have this constant?
from inspect import CO_VARKEYWORDS
def inspect_func_args(fn):
co = fn.__code__
nargs = co.co_argcount
names = co.co_varnames
args = list(names[:nargs])
has_kw = bool(co.co_flags & CO_VARKEYWORDS)
return args, has_kw
except ImportError:
def inspect_func_args(fn):
names, _, has_kw, _ = inspect.getargspec(fn)
return names, bool(has_kw)
def get_func_kwargs(func):
"""Return the set of legal kwargs for the given `func`.
Uses getargspec so is safe to call for methods, functions,
etc.
"""
return compat.inspect_getargspec(func)[0]
def get_callable_argspec(fn, no_self=False, _is_init=False):
"""Return the argument signature for any callable.
All pure-Python callables are accepted, including
functions, methods, classes, objects with __call__;
builtins and other edge cases like functools.partial() objects
raise a TypeError.
"""
if inspect.isbuiltin(fn):
raise TypeError("Can't inspect builtin: %s" % fn)
elif inspect.isfunction(fn):
if _is_init and no_self:
spec = compat.inspect_getargspec(fn)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn)
elif inspect.ismethod(fn):
if no_self and (_is_init or fn.__self__):
spec = compat.inspect_getargspec(fn.__func__)
return compat.ArgSpec(spec.args[1:], spec.varargs,
spec.keywords, spec.defaults)
else:
return compat.inspect_getargspec(fn.__func__)
elif inspect.isclass(fn):
return get_callable_argspec(
fn.__init__, no_self=no_self, _is_init=True)
elif hasattr(fn, '__func__'):
return compat.inspect_getargspec(fn.__func__)
elif hasattr(fn, '__call__'):
if inspect.ismethod(fn.__call__):
return get_callable_argspec(fn.__call__, no_self=no_self)
else:
raise TypeError("Can't inspect callable: %s" % fn)
else:
raise TypeError("Can't inspect callable: %s" % fn)
def format_argspec_plus(fn, grouped=True):
"""Returns a dictionary of formatted, introspected function arguments.
A enhanced variant of inspect.formatargspec to support code generation.
fn
An inspectable callable or tuple of inspect getargspec() results.
grouped
Defaults to True; include (parens, around, argument) lists
Returns:
args
Full inspect.formatargspec for fn
self_arg
The name of the first positional argument, varargs[0], or None
if the function defines no positional arguments.
apply_pos
args, re-written in calling rather than receiving syntax. Arguments are
passed positionally.
apply_kw
Like apply_pos, except keyword-ish args are passed as keywords.
Example::
>>> format_argspec_plus(lambda self, a, b, c=3, **d: 123)
{'args': '(self, a, b, c=3, **d)',
'self_arg': 'self',
'apply_kw': '(self, a, b, c=c, **d)',
'apply_pos': '(self, a, b, c, **d)'}
"""
if compat.callable(fn):
spec = compat.inspect_getfullargspec(fn)
else:
# we accept an existing argspec...
spec = fn
args = inspect.formatargspec(*spec)
if spec[0]:
self_arg = spec[0][0]
elif spec[1]:
self_arg = '%s[0]' % spec[1]
else:
self_arg = None
if compat.py3k:
apply_pos = inspect.formatargspec(spec[0], spec[1],
spec[2], None, spec[4])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
if spec[4]:
num_defaults += len(spec[4])
name_args = spec[0] + spec[4]
else:
apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2])
num_defaults = 0
if spec[3]:
num_defaults += len(spec[3])
name_args = spec[0]
if num_defaults:
defaulted_vals = name_args[0 - num_defaults:]
else:
defaulted_vals = ()
apply_kw = inspect.formatargspec(name_args, spec[1], spec[2],
defaulted_vals,
formatvalue=lambda x: '=' + x)
if grouped:
return dict(args=args, self_arg=self_arg,
apply_pos=apply_pos, apply_kw=apply_kw)
else:
return dict(args=args[1:-1], self_arg=self_arg,
apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1])
def format_argspec_init(method, grouped=True):
"""format_argspec_plus with considerations for typical __init__ methods
Wraps format_argspec_plus with error handling strategies for typical
__init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
args = (grouped and '(self, *args, **kwargs)'
or 'self, *args, **kwargs')
return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args)
def getargspec_init(method):
"""inspect.getargspec with considerations for typical __init__ methods
Wraps inspect.getargspec with error handling for typical __init__ cases::
object.__init__ -> (self)
other unreflectable (usually C) -> (self, *args, **kwargs)
"""
try:
return inspect.getargspec(method)
except TypeError:
if method is object.__init__:
return (['self'], None, None, None)
else:
return (['self'], 'args', 'kwargs', None)
def unbound_method_to_callable(func_or_cls):
"""Adjust the incoming callable such that a 'self' argument is not
required.
"""
if isinstance(func_or_cls, types.MethodType) and not func_or_cls.__self__:
return func_or_cls.__func__
else:
return func_or_cls
def generic_repr(obj, additional_kw=(), to_inspect=None):
"""Produce a __repr__() based on direct association of the __init__()
specification vs. same-named attributes present.
"""
if to_inspect is None:
to_inspect = [obj]
else:
to_inspect = _collections.to_list(to_inspect)
missing = object()
pos_args = []
kw_args = _collections.OrderedDict()
vargs = None
for i, insp in enumerate(to_inspect):
try:
(_args, _vargs, vkw, defaults) = \
inspect.getargspec(insp.__init__)
except TypeError:
continue
else:
default_len = defaults and len(defaults) or 0
if i == 0:
if _vargs:
vargs = _vargs
if default_len:
pos_args.extend(_args[1:-default_len])
else:
pos_args.extend(_args[1:])
else:
kw_args.update([
(arg, missing) for arg in _args[1:-default_len]
])
if default_len:
kw_args.update([
(arg, default)
for arg, default
in zip(_args[-default_len:], defaults)
])
output = []
output.extend(repr(getattr(obj, arg, None)) for arg in pos_args)
if vargs is not None and hasattr(obj, vargs):
output.extend([repr(val) for val in getattr(obj, vargs)])
for arg, defval in kw_args.items():
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except:
pass
if additional_kw:
for arg, defval in additional_kw:
try:
val = getattr(obj, arg, missing)
if val is not missing and val != defval:
output.append('%s=%r' % (arg, val))
except:
pass
return "%s(%s)" % (obj.__class__.__name__, ", ".join(output))
class portable_instancemethod(object):
"""Turn an instancemethod into a (parent, name) pair
to produce a serializable callable.
"""
def __init__(self, meth):
self.target = meth.__self__
self.name = meth.__name__
def __call__(self, *arg, **kw):
return getattr(self.target, self.name)(*arg, **kw)
def class_hierarchy(cls):
"""Return an unordered sequence of all classes related to cls.
Traverses diamond hierarchies.
Fibs slightly: subclasses of builtin types are not returned. Thus
class_hierarchy(class A(object)) returns (A, object), not A plus every
class systemwide that derives from object.
Old-style classes are discarded and hierarchies rooted on them
will not be descended.
"""
if compat.py2k:
if isinstance(cls, types.ClassType):
return list()
hier = set([cls])
process = list(cls.__mro__)
while process:
c = process.pop()
if compat.py2k:
if isinstance(c, types.ClassType):
continue
bases = (_ for _ in c.__bases__
if _ not in hier and not isinstance(_, types.ClassType))
else:
bases = (_ for _ in c.__bases__ if _ not in hier)
for b in bases:
process.append(b)
hier.add(b)
if compat.py3k:
if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'):
continue
else:
if c.__module__ == '__builtin__' or not hasattr(
c, '__subclasses__'):
continue
for s in [_ for _ in c.__subclasses__() if _ not in hier]:
process.append(s)
hier.add(s)
return list(hier)
def iterate_attributes(cls):
"""iterate all the keys and attributes associated
with a class, without using getattr().
Does not use getattr() so that class-sensitive
descriptors (i.e. property.__get__()) are not called.
"""
keys = dir(cls)
for key in keys:
for c in cls.__mro__:
if key in c.__dict__:
yield (key, c.__dict__[key])
break
def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None,
name='self.proxy', from_instance=None):
"""Automates delegation of __specials__ for a proxying type."""
if only:
dunders = only
else:
if skip is None:
skip = ('__slots__', '__del__', '__getattribute__',
'__metaclass__', '__getstate__', '__setstate__')
dunders = [m for m in dir(from_cls)
if (m.startswith('__') and m.endswith('__') and
not hasattr(into_cls, m) and m not in skip)]
for method in dunders:
try:
fn = getattr(from_cls, method)
if not hasattr(fn, '__call__'):
continue
fn = getattr(fn, 'im_func', fn)
except AttributeError:
continue
try:
spec = inspect.getargspec(fn)
fn_args = inspect.formatargspec(spec[0])
d_args = inspect.formatargspec(spec[0][1:])
except TypeError:
fn_args = '(self, *args, **kw)'
d_args = '(*args, **kw)'
py = ("def %(method)s%(fn_args)s: "
"return %(name)s.%(method)s%(d_args)s" % locals())
env = from_instance is not None and {name: from_instance} or {}
compat.exec_(py, env)
try:
env[method].__defaults__ = fn.__defaults__
except AttributeError:
pass
setattr(into_cls, method, env[method])
def methods_equivalent(meth1, meth2):
"""Return True if the two methods are the same implementation."""
return getattr(meth1, '__func__', meth1) is getattr(
meth2, '__func__', meth2)
def as_interface(obj, cls=None, methods=None, required=None):
"""Ensure basic interface compliance for an instance or dict of callables.
Checks that ``obj`` implements public methods of ``cls`` or has members
listed in ``methods``. If ``required`` is not supplied, implementing at
least one interface method is sufficient. Methods present on ``obj`` that
are not in the interface are ignored.
If ``obj`` is a dict and ``dict`` does not meet the interface
requirements, the keys of the dictionary are inspected. Keys present in
``obj`` that are not in the interface will raise TypeErrors.
Raises TypeError if ``obj`` does not meet the interface criteria.
In all passing cases, an object with callable members is returned. In the
simple case, ``obj`` is returned as-is; if dict processing kicks in then
an anonymous class is returned.
obj
A type, instance, or dictionary of callables.
cls
Optional, a type. All public methods of cls are considered the
interface. An ``obj`` instance of cls will always pass, ignoring
``required``..
methods
Optional, a sequence of method names to consider as the interface.
required
Optional, a sequence of mandatory implementations. If omitted, an
``obj`` that provides at least one interface method is considered
sufficient. As a convenience, required may be a type, in which case
all public methods of the type are required.
"""
if not cls and not methods:
raise TypeError('a class or collection of method names are required')
if isinstance(cls, type) and isinstance(obj, cls):
return obj
interface = set(methods or [m for m in dir(cls) if not m.startswith('_')])
implemented = set(dir(obj))
complies = operator.ge
if isinstance(required, type):
required = interface
elif not required:
required = set()
complies = operator.gt
else:
required = set(required)
if complies(implemented.intersection(interface), required):
return obj
# No dict duck typing here.
if not isinstance(obj, dict):
qualifier = complies is operator.gt and 'any of' or 'all of'
raise TypeError("%r does not implement %s: %s" % (
obj, qualifier, ', '.join(interface)))
class AnonymousInterface(object):
"""A callable-holding shell."""
if cls:
AnonymousInterface.__name__ = 'Anonymous' + cls.__name__
found = set()
for method, impl in dictlike_iteritems(obj):
if method not in interface:
raise TypeError("%r: unknown in this interface" % method)
if not compat.callable(impl):
raise TypeError("%r=%r is not callable" % (method, impl))
setattr(AnonymousInterface, method, staticmethod(impl))
found.add(method)
if complies(found, required):
return AnonymousInterface
raise TypeError("dictionary does not contain required keys %s" %
', '.join(required - found))
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = self.fget(obj)
return result
def _reset(self, obj):
memoized_property.reset(obj, self.__name__)
@classmethod
def reset(cls, obj, name):
obj.__dict__.pop(name, None)
class memoized_instancemethod(object):
"""Decorate a method memoize its return value.
Best applied to no-arg methods: memoization is not sensitive to
argument values, and will always return the same value even when
called with different arguments.
"""
def __init__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
def __get__(self, obj, cls):
if obj is None:
return self
def oneshot(*args, **kw):
result = self.fget(obj, *args, **kw)
memo = lambda *a, **kw: result
memo.__name__ = self.__name__
memo.__doc__ = self.__doc__
obj.__dict__[self.__name__] = memo
return result
oneshot.__name__ = self.__name__
oneshot.__doc__ = self.__doc__
return oneshot
class group_expirable_memoized_property(object):
"""A family of @memoized_properties that can be expired in tandem."""
def __init__(self, attributes=()):
self.attributes = []
if attributes:
self.attributes.extend(attributes)
def expire_instance(self, instance):
"""Expire all memoized properties for *instance*."""
stash = instance.__dict__
for attribute in self.attributes:
stash.pop(attribute, None)
def __call__(self, fn):
self.attributes.append(fn.__name__)
return memoized_property(fn)
def method(self, fn):
self.attributes.append(fn.__name__)
return memoized_instancemethod(fn)
def dependency_for(modulename):
def decorate(obj):
# TODO: would be nice to improve on this import silliness,
# unfortunately importlib doesn't work that great either
tokens = modulename.split(".")
mod = compat.import_(
".".join(tokens[0:-1]), globals(), locals(), tokens[-1])
mod = getattr(mod, tokens[-1])
setattr(mod, obj.__name__, obj)
return obj
return decorate
class dependencies(object):
"""Apply imported dependencies as arguments to a function.
E.g.::
@util.dependencies(
"sqlalchemy.sql.widget",
"sqlalchemy.engine.default"
);
def some_func(self, widget, default, arg1, arg2, **kw):
# ...
Rationale is so that the impact of a dependency cycle can be
associated directly with the few functions that cause the cycle,
and not pollute the module-level namespace.
"""
def __init__(self, *deps):
self.import_deps = []
for dep in deps:
tokens = dep.split(".")
self.import_deps.append(
dependencies._importlater(
".".join(tokens[0:-1]),
tokens[-1]
)
)
def __call__(self, fn):
import_deps = self.import_deps
spec = compat.inspect_getfullargspec(fn)
spec_zero = list(spec[0])
hasself = spec_zero[0] in ('self', 'cls')
for i in range(len(import_deps)):
spec[0][i + (1 if hasself else 0)] = "import_deps[%r]" % i
inner_spec = format_argspec_plus(spec, grouped=False)
for impname in import_deps:
del spec_zero[1 if hasself else 0]
spec[0][:] = spec_zero
outer_spec = format_argspec_plus(spec, grouped=False)
code = 'lambda %(args)s: fn(%(apply_kw)s)' % {
"args": outer_spec['args'],
"apply_kw": inner_spec['apply_kw']
}
decorated = eval(code, locals())
decorated.__defaults__ = getattr(fn, 'im_func', fn).__defaults__
return update_wrapper(decorated, fn)
@classmethod
def resolve_all(cls, path):
for m in list(dependencies._unresolved):
if m._full_path.startswith(path):
m._resolve()
_unresolved = set()
_by_key = {}
class _importlater(object):
_unresolved = set()
_by_key = {}
def __new__(cls, path, addtl):
key = path + "." + addtl
if key in dependencies._by_key:
return dependencies._by_key[key]
else:
dependencies._by_key[key] = imp = object.__new__(cls)
return imp
def __init__(self, path, addtl):
self._il_path = path
self._il_addtl = addtl
dependencies._unresolved.add(self)
@property
def _full_path(self):
return self._il_path + "." + self._il_addtl
@memoized_property
def module(self):
if self in dependencies._unresolved:
raise ImportError(
"importlater.resolve_all() hasn't "
"been called (this is %s %s)"
% (self._il_path, self._il_addtl))
return getattr(self._initial_import, self._il_addtl)
def _resolve(self):
dependencies._unresolved.discard(self)
self._initial_import = compat.import_(
self._il_path, globals(), locals(),
[self._il_addtl])
def __getattr__(self, key):
if key == 'module':
raise ImportError("Could not resolve module %s"
% self._full_path)
try:
attr = getattr(self.module, key)
except AttributeError:
raise AttributeError(
"Module %s has no attribute '%s'" %
(self._full_path, key)
)
self.__dict__[key] = attr
return attr
# from paste.deploy.converters
def asbool(obj):
if isinstance(obj, compat.string_types):
obj = obj.strip().lower()
if obj in ['true', 'yes', 'on', 'y', 't', '1']:
return True
elif obj in ['false', 'no', 'off', 'n', 'f', '0']:
return False
else:
raise ValueError("String is not true/false: %r" % obj)
return bool(obj)
def bool_or_str(*text):
"""Return a callable that will evaulate a string as
boolean, or one of a set of "alternate" string values.
"""
def bool_or_value(obj):
if obj in text:
return obj
else:
return asbool(obj)
return bool_or_value
def asint(value):
"""Coerce to integer."""
if value is None:
return value
return int(value)
def coerce_kw_type(kw, key, type_, flexi_bool=True):
"""If 'key' is present in dict 'kw', coerce its value to type 'type\_' if
necessary. If 'flexi_bool' is True, the string '0' is considered false
when coercing to boolean.
"""
if key in kw and not isinstance(kw[key], type_) and kw[key] is not None:
if type_ is bool and flexi_bool:
kw[key] = asbool(kw[key])
else:
kw[key] = type_(kw[key])
def constructor_copy(obj, cls, **kw):
"""Instantiate cls using the __dict__ of obj as constructor arguments.
Uses inspect to match the named arguments of ``cls``.
"""
names = get_cls_kwargs(cls)
kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__)
return cls(**kw)
def counter():
"""Return a threadsafe counter function."""
lock = compat.threading.Lock()
counter = itertools.count(1)
# avoid the 2to3 "next" transformation...
def _next():
lock.acquire()
try:
return next(counter)
finally:
lock.release()
return _next
def duck_type_collection(specimen, default=None):
"""Given an instance or class, guess if it is or is acting as one of
the basic collection types: list, set and dict. If the __emulates__
property is present, return that preferentially.
"""
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
issubclass(specimen.__emulates__, set)):
return set
else:
return specimen.__emulates__
isa = isinstance(specimen, type) and issubclass or isinstance
if isa(specimen, list):
return list
elif isa(specimen, set):
return set
elif isa(specimen, dict):
return dict
if hasattr(specimen, 'append'):
return list
elif hasattr(specimen, 'add'):
return set
elif hasattr(specimen, 'set'):
return dict
else:
return default
def assert_arg_type(arg, argtype, name):
if isinstance(arg, argtype):
return arg
else:
if isinstance(argtype, tuple):
raise exc.ArgumentError(
"Argument '%s' is expected to be one of type %s, got '%s'" %
(name, ' or '.join("'%s'" % a for a in argtype), type(arg)))
else:
raise exc.ArgumentError(
"Argument '%s' is expected to be of type '%s', got '%s'" %
(name, argtype, type(arg)))
def dictlike_iteritems(dictlike):
"""Return a (key, value) iterator for almost any dict-like object."""
if compat.py3k:
if hasattr(dictlike, 'items'):
return list(dictlike.items())
else:
if hasattr(dictlike, 'iteritems'):
return dictlike.iteritems()
elif hasattr(dictlike, 'items'):
return iter(dictlike.items())
getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None))
if getter is None:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
if hasattr(dictlike, 'iterkeys'):
def iterator():
for key in dictlike.iterkeys():
yield key, getter(key)
return iterator()
elif hasattr(dictlike, 'keys'):
return iter((key, getter(key)) for key in dictlike.keys())
else:
raise TypeError(
"Object '%r' is not dict-like" % dictlike)
class classproperty(property):
"""A decorator that behaves like @property except that operates
on classes rather than instances.
The decorator is currently special when using the declarative
module, but note that the
:class:`~.sqlalchemy.ext.declarative.declared_attr`
decorator should be used for this purpose with declarative.
"""
def __init__(self, fget, *arg, **kw):
super(classproperty, self).__init__(fget, *arg, **kw)
self.__doc__ = fget.__doc__
def __get__(desc, self, cls):
return desc.fget(cls)
class hybridmethod(object):
"""Decorate a function as cls- or instance- level."""
def __init__(self, func, expr=None):
self.func = func
def __get__(self, instance, owner):
if instance is None:
return self.func.__get__(owner, owner.__class__)
else:
return self.func.__get__(instance, owner)
class _symbol(int):
def __new__(self, name, doc=None, canonical=None):
"""Construct a new named symbol."""
assert isinstance(name, compat.string_types)
if canonical is None:
canonical = hash(name)
v = int.__new__(_symbol, canonical)
v.name = name
if doc:
v.__doc__ = doc
return v
def __reduce__(self):
return symbol, (self.name, "x", int(self))
def __str__(self):
return repr(self)
def __repr__(self):
return "symbol(%r)" % self.name
_symbol.__name__ = 'symbol'
class symbol(object):
"""A constant symbol.
>>> symbol('foo') is symbol('foo')
True
>>> symbol('foo')
<symbol 'foo>
A slight refinement of the MAGICCOOKIE=object() pattern. The primary
advantage of symbol() is its repr(). They are also singletons.
Repeated calls of symbol('name') will all return the same instance.
The optional ``doc`` argument assigns to ``__doc__``. This
is strictly so that Sphinx autoattr picks up the docstring we want
(it doesn't appear to pick up the in-module docstring if the datamember
is in a different module - autoattribute also blows up completely).
If Sphinx fixes/improves this then we would no longer need
``doc`` here.
"""
symbols = {}
_lock = compat.threading.Lock()
def __new__(cls, name, doc=None, canonical=None):
cls._lock.acquire()
try:
sym = cls.symbols.get(name)
if sym is None:
cls.symbols[name] = sym = _symbol(name, doc, canonical)
return sym
finally:
symbol._lock.release()
_creation_order = 1
def set_creation_order(instance):
"""Assign a '_creation_order' sequence to the given instance.
This allows multiple instances to be sorted in order of creation
(typically within a single thread; the counter is not particularly
threadsafe).
"""
global _creation_order
instance._creation_order = _creation_order
_creation_order += 1
def warn_exception(func, *args, **kwargs):
"""executes the given function, catches all exceptions and converts to
a warning.
"""
try:
return func(*args, **kwargs)
except:
warn("%s('%s') ignored" % sys.exc_info()[0:2])
def warn(msg, stacklevel=3):
"""Issue a warning.
If msg is a string, :class:`.exc.SAWarning` is used as
the category.
.. note::
This function is swapped out when the test suite
runs, with a compatible version that uses
warnings.warn_explicit, so that the warnings registry can
be controlled.
"""
if isinstance(msg, compat.string_types):
warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel)
else:
warnings.warn(msg, stacklevel=stacklevel)
def only_once(fn):
"""Decorate the given function to be a no-op after it is called exactly
once."""
once = [fn]
def go(*arg, **kw):
if once:
once_fn = once.pop()
return once_fn(*arg, **kw)
return go
_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py')
_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)')
def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE):
"""Chop extraneous lines off beginning and end of a traceback.
:param tb:
a list of traceback lines as returned by ``traceback.format_stack()``
:param exclude_prefix:
a regular expression object matching lines to skip at beginning of
``tb``
:param exclude_suffix:
a regular expression object matching lines to skip at end of ``tb``
"""
start = 0
end = len(tb) - 1
while start <= end and exclude_prefix.search(tb[start]):
start += 1
while start <= end and exclude_suffix.search(tb[end]):
end -= 1
return tb[start:end + 1]
NoneType = type(None)
| apache-2.0 |
minhphung171093/OpenERP_V7 | openerp/addons/account/wizard/account_report_common_partner.py | 56 | 2031 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_common_partner_report(osv.osv_memory):
_name = 'account.common.partner.report'
_description = 'Account Common Partner Report'
_inherit = "account.common.report"
_columns = {
'result_selection': fields.selection([('customer','Receivable Accounts'),
('supplier','Payable Accounts'),
('customer_supplier','Receivable and Payable Accounts')],
"Partner's", required=True),
}
_defaults = {
'result_selection': 'customer',
}
def pre_print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data['form'].update(self.read(cr, uid, ids, ['result_selection'], context=context)[0])
return data
account_common_partner_report()
#vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sein-tao/pyBioUtil | BioUtil/vcf.py | 1 | 3924 | #!/usr/bin/env python3
"""
This module serves as a wrapper for vcf module [https://github.com/jamescasbon/PyVCF]
open() and vcfFile class is for directly init of vcf file object,
vcfReader and vcfWriter is wraper for vcf.Reader and vcf.Writer,
which accept file name as first argument (other than fsock),
and also could accept vcf file without header (by additionally provide header infomation)
This file is under GPLv2 Lisense
Sein Tao <sein.tao@gmail.com>, 2015
"""
import vcf
import csv
import types
import os
import builtins
from .xz import xzopen
from itertools import chain
from copy import copy, deepcopy
from .decorator import context_decorator
# import log
# log.basicConfig(level=log.DEBUG)
__all__ = ['vcfFile', 'open', 'vcfWriter', 'vcfReader']
_vcf = vcf
_Reader = vcf.Reader
_Writer = vcf.Writer
Record=vcf.model._Record
Call=vcf.model._Call
class vcfFile(object):
def __new__(cls, file, mode='r', template=None, prepend_chr=False):
if mode == 'r':
# add filename to avoid the **stupid** compression detection on stream from vcf module
obj = vcf.Reader(xzopen(file, mode), prepend_chr=prepend_chr, filename=file)
obj.close = types.MethodType(lambda self: self._reader.close(), obj)
return obj
elif mode == 'w':
obj = vcf.Writer(xzopen(file, mode), template)
obj.write = obj.write_record
return obj
else:
raise ValueError("Unkonw mode: " + mode)
def _read_header(self, header, template):
if header and template:
raise ValueError("only one of header and template should specified")
if header:
header_fh = xzopen(header, 'r')
template = vcf.Reader(header_fh)
header_fh.close()
return template
@context_decorator
class vcfReader(vcf.Reader):
def __init__(self, file=None, header=None, fsock=None,
template=None, **kwargs):
if fsock:
self._fsock = fsock
elif file:
self._fsock = xzopen(file, 'r')
template = _read_header(self, header, template)
sup = super(self.__class__, self)
if template:
self._parse_metainfo = types.MethodType(lambda self: None, self)
sup.__init__(self._fsock, filename=file, **kwargs)
for attr in ('metadata',
'infos', 'filters', 'alts', 'contigs', 'formats',
'_column_headers', 'samples', '_sample_indexes'):
setattr(self, attr, deepcopy(getattr(template, attr)) )
else:
sup.__init__(self._fsock, filename=file, **kwargs)
def close(self):
try:
self._fsock.close()
except AttributeError:
pass
@context_decorator
class vcfWriter(vcf.Writer):
def __init__(self, file=None, header=None, fsock=None,
template=None, write_header = True, **kwargs):
template = _read_header(self, header, template)
if fsock:
self._fsock = fsock
elif file:
self._fsock = xzopen(file, 'w')
sup = super(self.__class__, self)
if write_header is False:
null = builtins.open(os.devnull, 'w')
sup.__init__(null, template, **kwargs)
null.close()
self.stream = self._fsock
self.writer = csv.writer(self.stream, delimiter="\t",
lineterminator=kwargs.get("lineterminator","\n"))
else:
sup.__init__(self._fsock, template, **kwargs)
self.write = self.write_record
def open(file, mode='r', header = None, **kwargs):
"wrapper for vcfReader and vcfWriter"
#D# log.debug([mode, kwargs])
if 'r' in mode:
return vcfReader(file, header=header, **kwargs)
elif 'w' in mode:
return vcfWriter(file, header=header, **kwargs)
else:
raise ValueError("unknown mode: %s" % mode)
| gpl-2.0 |
mjirayu/sit_academy | common/lib/chem/chem/chemtools.py | 250 | 10721 | """This module originally includes functions for grading Vsepr problems.
Also, may be this module is the place for other chemistry-related grade functions. TODO: discuss it.
"""
import json
import unittest
import itertools
def vsepr_parse_user_answer(user_input):
"""
user_input is json generated by vsepr.js from dictionary.
There are must be only two keys in original user_input dictionary: "geometry" and "atoms".
Format: u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}'
Order of elements inside "atoms" subdict does not matters.
Return dict from parsed json.
"Atoms" subdict stores positions of atoms in molecule.
General types of positions:
c0 - central atom
p0..pN - peripheral atoms
a0..aN - axial atoms
e0..eN - equatorial atoms
Each position is dictionary key, i.e. user_input["atoms"]["c0"] is central atom, user_input["atoms"]["a0"] is one of axial atoms.
Special position only for AX6 (Octahedral) geometry:
e10, e12 - atom pairs opposite the central atom,
e20, e22 - atom pairs opposite the central atom,
e1 and e2 pairs lying crosswise in equatorial plane.
In user_input["atoms"] may be only 3 set of keys:
(c0,p0..pN),
(c0, a0..aN, e0..eN),
(c0, a0, a1, e10,e11,e20,e21) - if geometry is AX6.
"""
return json.loads(user_input)
def vsepr_build_correct_answer(geometry, atoms):
"""
geometry is string.
atoms is dict of atoms with proper positions.
Example:
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
returns a dictionary composed from input values:
{'geometry': geometry, 'atoms': atoms}
"""
return {'geometry': geometry, 'atoms': atoms}
def vsepr_grade(user_input, correct_answer, convert_to_peripheral=False):
"""
This function does comparison between user_input and correct_answer.
Comparison is successful if all steps are successful:
1) geometries are equal
2) central atoms (index in dictionary 'c0') are equal
3):
In next steps there is comparing of corresponding subsets of atom positions: equatorial (e0..eN), axial (a0..aN) or peripheral (p0..pN)
If convert_to_peripheral is True, then axial and equatorial positions are converted to peripheral.
This means that user_input from:
"atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}' after parsing to json
is converted to:
{"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"}
i.e. aX and eX -> pX
So if converted, p subsets are compared,
if not a and e subsets are compared
If all subsets are equal, grade succeeds.
There is also one special case for AX6 geometry.
In this case user_input["atoms"] contains special 3 symbol keys: e10, e12, e20, and e21.
Correct answer for this geometry can be of 3 types:
1) c0 and peripheral
2) c0 and axial and equatorial
3) c0 and axial and equatorial-subset-1 (e1X) and equatorial-subset-2 (e2X)
If correct answer is type 1 or 2, then user_input is converted from type 3 to type 2 (or to type 1 if convert_to_peripheral is True)
If correct_answer is type 3, then we done special case comparison. We have 3 sets of atoms positions both in user_input and correct_answer: axial, eq-1 and eq-2.
Answer will be correct if these sets are equals for one of permutations. For example, if :
user_axial = correct_eq-1
user_eq-1 = correct-axial
user_eq-2 = correct-eq-2
"""
if user_input['geometry'] != correct_answer['geometry']:
return False
if user_input['atoms']['c0'] != correct_answer['atoms']['c0']:
return False
if convert_to_peripheral:
# convert user_input from (a,e,e1,e2) to (p)
# correct_answer must be set in (p) using this flag
c0 = user_input['atoms'].pop('c0')
user_input['atoms'] = {'p' + str(i): v for i, v in enumerate(user_input['atoms'].values())}
user_input['atoms']['c0'] = c0
# special case for AX6
if 'e10' in correct_answer['atoms']: # need check e1x, e2x symmetry for AX6..
a_user = {}
a_correct = {}
for ea_position in ['a', 'e1', 'e2']: # collecting positions:
a_user[ea_position] = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct[ea_position] = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
correct = [sorted(a_correct['a'])] + [sorted(a_correct['e1'])] + [sorted(a_correct['e2'])]
for permutation in itertools.permutations(['a', 'e1', 'e2']):
if correct == [sorted(a_user[permutation[0]])] + [sorted(a_user[permutation[1]])] + [sorted(a_user[permutation[2]])]:
return True
return False
else: # no need to check e1x,e2x symmetry - convert them to ex
if 'e10' in user_input['atoms']: # e1x exists, it is AX6.. case
e_index = 0
for k, v in user_input['atoms'].items():
if len(k) == 3: # e1x
del user_input['atoms'][k]
user_input['atoms']['e' + str(e_index)] = v
e_index += 1
# common case
for ea_position in ['p', 'a', 'e']:
# collecting atoms:
a_user = [v for k, v in user_input['atoms'].items() if k.startswith(ea_position)]
a_correct = [v for k, v in correct_answer['atoms'].items() if k.startswith(ea_position)]
# print a_user, a_correct
if len(a_user) != len(a_correct):
return False
if sorted(a_user) != sorted(a_correct):
return False
return True
class Test_Grade(unittest.TestCase):
''' test grade function '''
def test_incorrect_geometry(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX3E0","atoms":{"c0": "B","p0": "F","p1": "B","p2": "F"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX4E0", atoms={"c0": "N", "p0": "H", "p1": "(ep)", "p2": "H", "p3": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX4E0","atoms":{"c0": "N","p0": "H","p1": "(ep)","p2": "H", "p3": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "test", "a1": "(ep)", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_ae_convert_to_p_but_input_not_in_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "(ep)", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_ae_convert_to_p(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "p0": "(ep)", "p1": "test", "p2": "H", "p3": "H", "p4": "(ep)", "p6": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "test","a1": "(ep)","e10": "H","e11": "(ep)","e20": "H","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer, convert_to_peripheral=True))
def test_correct_answer_e1e2_in_a(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "(ep)","a1": "(ep)","e10": "H","e11": "H","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e1(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "(ep)","e20": "H","e21": "H"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_correct_answer_e1e2_in_e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "H","e11": "H","e20": "(ep)","e21": "(ep)"}}')
self.assertTrue(vsepr_grade(user_answer, correct_answer))
def test_incorrect_answer_e1e2(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "(ep)", "e10": "H", "e11": "H", "e20": "H", "e21": "H"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "Br","a0": "H","a1": "H","e10": "(ep)","e11": "H","e20": "H","e21": "(ep)"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def test_incorrect_c0(self):
correct_answer = vsepr_build_correct_answer(geometry="AX6E0", atoms={"c0": "Br", "a0": "(ep)", "a1": "test", "e0": "H", "e1": "H", "e2": "H", "e3": "(ep)"})
user_answer = vsepr_parse_user_answer(u'{"geometry": "AX6E0","atoms":{"c0": "H","a0": "test","a1": "(ep)","e0": "H","e1": "H","e2": "(ep)","e3": "H"}}')
self.assertFalse(vsepr_grade(user_answer, correct_answer))
def suite():
testcases = [Test_Grade]
suites = []
for testcase in testcases:
suites.append(unittest.TestLoader().loadTestsFromTestCase(testcase))
return unittest.TestSuite(suites)
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite())
| agpl-3.0 |
bakhtout/odoo-educ | addons/account_test/report/account_test_report.py | 156 | 3740 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import datetime
import time
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.report import report_sxw
#
# Use period and Journal for selection or resources
#
class report_assert_account(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(report_assert_account, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'datetime': datetime,
'execute_code': self.execute_code,
})
def execute_code(self, code_exec):
def reconciled_inv():
"""
returns the list of invoices that are set as reconciled = True
"""
return self.pool.get('account.invoice').search(self.cr, self.uid, [('reconciled','=',True)])
def order_columns(item, cols=None):
"""
This function is used to display a dictionary as a string, with its columns in the order chosen.
:param item: dict
:param cols: list of field names
:returns: a list of tuples (fieldname: value) in a similar way that would dict.items() do except that the
returned values are following the order given by cols
:rtype: [(key, value)]
"""
if cols is None:
cols = item.keys()
return [(col, item.get(col)) for col in cols if col in item.keys()]
localdict = {
'cr': self.cr,
'uid': self.uid,
'reconciled_inv': reconciled_inv, #specific function used in different tests
'result': None, #used to store the result of the test
'column_order': None, #used to choose the display order of columns (in case you are returning a list of dict)
}
exec code_exec in localdict
result = localdict['result']
column_order = localdict.get('column_order', None)
if not isinstance(result, (tuple, list, set)):
result = [result]
if not result:
result = [_('The test was passed successfully')]
else:
def _format(item):
if isinstance(item, dict):
return ', '.join(["%s: %s" % (tup[0], tup[1]) for tup in order_columns(item, column_order)])
else:
return item
result = [_(_format(rec)) for rec in result]
return result
class report_accounttest(osv.AbstractModel):
_name = 'report.account_test.report_accounttest'
_inherit = 'report.abstract_report'
_template = 'account_test.report_accounttest'
_wrapped_report_class = report_assert_account
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
davidharrigan/django | tests/postgres_tests/test_ranges.py | 98 | 24582 | import datetime
import json
import unittest
from django import forms
from django.core import exceptions, serializers
from django.db import connection
from django.db.models import F
from django.test import TestCase, override_settings
from django.utils import timezone
from . import PostgreSQLTestCase
from .models import RangeLookupsModel, RangesModel
try:
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django.contrib.postgres import fields as pg_fields, forms as pg_forms
from django.contrib.postgres.validators import (
RangeMaxValueValidator, RangeMinValueValidator,
)
except ImportError:
pass
def skipUnlessPG92(test):
try:
PG_VERSION = connection.pg_version
except AttributeError:
PG_VERSION = 0
if PG_VERSION < 90200:
return unittest.skip('PostgreSQL >= 9.2 required')(test)
return test
@skipUnlessPG92
class TestSaveLoad(TestCase):
def test_all_fields(self):
now = timezone.now()
instance = RangesModel(
ints=NumericRange(0, 10),
bigints=NumericRange(10, 20),
floats=NumericRange(20, 30),
timestamps=DateTimeTZRange(now - datetime.timedelta(hours=1), now),
dates=DateRange(now.date() - datetime.timedelta(days=1), now.date()),
)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(instance.ints, loaded.ints)
self.assertEqual(instance.bigints, loaded.bigints)
self.assertEqual(instance.floats, loaded.floats)
self.assertEqual(instance.timestamps, loaded.timestamps)
self.assertEqual(instance.dates, loaded.dates)
def test_range_object(self):
r = NumericRange(0, 10)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_tuple(self):
instance = RangesModel(ints=(0, 10))
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(NumericRange(0, 10), loaded.ints)
def test_range_object_boundaries(self):
r = NumericRange(0, 10, '[]')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
self.assertTrue(10 in loaded.floats)
def test_unbounded(self):
r = NumericRange(None, None, '()')
instance = RangesModel(floats=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.floats)
def test_empty(self):
r = NumericRange(empty=True)
instance = RangesModel(ints=r)
instance.save()
loaded = RangesModel.objects.get()
self.assertEqual(r, loaded.ints)
def test_null(self):
instance = RangesModel(ints=None)
instance.save()
loaded = RangesModel.objects.get()
self.assertIsNone(loaded.ints)
@skipUnlessPG92
class TestQuerying(TestCase):
@classmethod
def setUpTestData(cls):
cls.objs = [
RangesModel.objects.create(ints=NumericRange(0, 10)),
RangesModel.objects.create(ints=NumericRange(5, 15)),
RangesModel.objects.create(ints=NumericRange(None, 0)),
RangesModel.objects.create(ints=NumericRange(empty=True)),
RangesModel.objects.create(ints=None),
]
def test_exact(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__exact=NumericRange(0, 10)),
[self.objs[0]],
)
def test_isnull(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isnull=True),
[self.objs[4]],
)
def test_isempty(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__isempty=True),
[self.objs[3]],
)
def test_contains(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=8),
[self.objs[0], self.objs[1]],
)
def test_contains_range(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contains=NumericRange(3, 8)),
[self.objs[0]],
)
def test_contained_by(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__contained_by=NumericRange(0, 20)),
[self.objs[0], self.objs[1], self.objs[3]],
)
def test_overlap(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__overlap=NumericRange(3, 8)),
[self.objs[0], self.objs[1]],
)
def test_fully_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_lt=NumericRange(5, 10)),
[self.objs[2]],
)
def test_fully_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__fully_gt=NumericRange(5, 10)),
[],
)
def test_not_lt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_lt=NumericRange(5, 10)),
[self.objs[1]],
)
def test_not_gt(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__not_gt=NumericRange(5, 10)),
[self.objs[0], self.objs[2]],
)
def test_adjacent_to(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__adjacent_to=NumericRange(0, 5)),
[self.objs[1], self.objs[2]],
)
def test_startswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith=0),
[self.objs[0]],
)
def test_endswith(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__endswith=0),
[self.objs[2]],
)
def test_startswith_chaining(self):
self.assertSequenceEqual(
RangesModel.objects.filter(ints__startswith__gte=0),
[self.objs[0], self.objs[1]],
)
@skipUnlessPG92
class TestQueringWithRanges(TestCase):
def test_date_range(self):
objs = [
RangeLookupsModel.objects.create(date='2015-01-01'),
RangeLookupsModel.objects.create(date='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_date_range_datetime_field(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01'),
RangeLookupsModel.objects.create(timestamp='2015-05-05'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(timestamp__date__contained_by=DateRange('2015-01-01', '2015-05-04')),
[objs[0]],
)
def test_datetime_range(self):
objs = [
RangeLookupsModel.objects.create(timestamp='2015-01-01T09:00:00'),
RangeLookupsModel.objects.create(timestamp='2015-05-05T17:00:00'),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(
timestamp__contained_by=DateTimeTZRange('2015-01-01T09:00', '2015-05-04T23:55')
),
[objs[0]],
)
def test_integer_range(self):
objs = [
RangeLookupsModel.objects.create(integer=5),
RangeLookupsModel.objects.create(integer=99),
RangeLookupsModel.objects.create(integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_biginteger_range(self):
objs = [
RangeLookupsModel.objects.create(big_integer=5),
RangeLookupsModel.objects.create(big_integer=99),
RangeLookupsModel.objects.create(big_integer=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(big_integer__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_float_range(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=NumericRange(1, 98)),
[objs[0]]
)
def test_f_ranges(self):
parent = RangesModel.objects.create(floats=NumericRange(0, 10))
objs = [
RangeLookupsModel.objects.create(float=5, parent=parent),
RangeLookupsModel.objects.create(float=99, parent=parent),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.filter(float__contained_by=F('parent__floats')),
[objs[0]]
)
def test_exclude(self):
objs = [
RangeLookupsModel.objects.create(float=5),
RangeLookupsModel.objects.create(float=99),
RangeLookupsModel.objects.create(float=-1),
]
self.assertSequenceEqual(
RangeLookupsModel.objects.exclude(float__contained_by=NumericRange(0, 100)),
[objs[2]]
)
@skipUnlessPG92
class TestSerialization(TestCase):
test_data = (
'[{"fields": {"ints": "{\\"upper\\": \\"10\\", \\"lower\\": \\"0\\", '
'\\"bounds\\": \\"[)\\"}", "floats": "{\\"empty\\": true}", '
'"bigints": null, "timestamps": "{\\"upper\\": \\"2014-02-02T12:12:12+00:00\\", '
'\\"lower\\": \\"2014-01-01T00:00:00+00:00\\", \\"bounds\\": \\"[)\\"}", '
'"dates": "{\\"upper\\": \\"2014-02-02\\", \\"lower\\": \\"2014-01-01\\", \\"bounds\\": \\"[)\\"}" }, '
'"model": "postgres_tests.rangesmodel", "pk": null}]'
)
lower_date = datetime.date(2014, 1, 1)
upper_date = datetime.date(2014, 2, 2)
lower_dt = datetime.datetime(2014, 1, 1, 0, 0, 0, tzinfo=timezone.utc)
upper_dt = datetime.datetime(2014, 2, 2, 12, 12, 12, tzinfo=timezone.utc)
def test_dumping(self):
instance = RangesModel(ints=NumericRange(0, 10), floats=NumericRange(empty=True),
timestamps=DateTimeTZRange(self.lower_dt, self.upper_dt),
dates=DateRange(self.lower_date, self.upper_date))
data = serializers.serialize('json', [instance])
dumped = json.loads(data)
for field in ('ints', 'dates', 'timestamps'):
dumped[0]['fields'][field] = json.loads(dumped[0]['fields'][field])
check = json.loads(self.test_data)
for field in ('ints', 'dates', 'timestamps'):
check[0]['fields'][field] = json.loads(check[0]['fields'][field])
self.assertEqual(dumped, check)
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.ints, NumericRange(0, 10))
self.assertEqual(instance.floats, NumericRange(empty=True))
self.assertEqual(instance.bigints, None)
class TestValidators(PostgreSQLTestCase):
def test_max(self):
validator = RangeMaxValueValidator(5)
validator(NumericRange(0, 5))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely less than or equal to 5.')
self.assertEqual(cm.exception.code, 'max_value')
def test_min(self):
validator = RangeMinValueValidator(5)
validator(NumericRange(10, 15))
with self.assertRaises(exceptions.ValidationError) as cm:
validator(NumericRange(0, 10))
self.assertEqual(cm.exception.messages[0], 'Ensure that this range is completely greater than or equal to 5.')
self.assertEqual(cm.exception.code, 'min_value')
class TestFormField(PostgreSQLTestCase):
def test_valid_integer(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['1', '2'])
self.assertEqual(value, NumericRange(1, 2))
def test_valid_floats(self):
field = pg_forms.FloatRangeField()
value = field.clean(['1.12345', '2.001'])
self.assertEqual(value, NumericRange(1.12345, 2.001))
def test_valid_timestamps(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['01/01/2014 00:00:00', '02/02/2014 12:12:12'])
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(value, DateTimeTZRange(lower, upper))
def test_valid_dates(self):
field = pg_forms.DateRangeField()
value = field.clean(['01/01/2014', '02/02/2014'])
lower = datetime.date(2014, 1, 1)
upper = datetime.date(2014, 2, 2)
self.assertEqual(value, DateRange(lower, upper))
def test_using_split_datetime_widget(self):
class SplitDateTimeRangeField(pg_forms.DateTimeRangeField):
base_field = forms.SplitDateTimeField
class SplitForm(forms.Form):
field = SplitDateTimeRangeField()
form = SplitForm()
self.assertHTMLEqual(str(form), '''
<tr>
<th>
<label for="id_field_0">Field:</label>
</th>
<td>
<input id="id_field_0_0" name="field_0_0" type="text" />
<input id="id_field_0_1" name="field_0_1" type="text" />
<input id="id_field_1_0" name="field_1_0" type="text" />
<input id="id_field_1_1" name="field_1_1" type="text" />
</td>
</tr>
''')
form = SplitForm({
'field_0_0': '01/01/2014',
'field_0_1': '00:00:00',
'field_1_0': '02/02/2014',
'field_1_1': '12:12:12',
})
self.assertTrue(form.is_valid())
lower = datetime.datetime(2014, 1, 1, 0, 0, 0)
upper = datetime.datetime(2014, 2, 2, 12, 12, 12)
self.assertEqual(form.cleaned_data['field'], DateTimeTZRange(lower, upper))
def test_none(self):
field = pg_forms.IntegerRangeField(required=False)
value = field.clean(['', ''])
self.assertEqual(value, None)
def test_rendering(self):
class RangeForm(forms.Form):
ints = pg_forms.IntegerRangeField()
self.assertHTMLEqual(str(RangeForm()), '''
<tr>
<th><label for="id_ints_0">Ints:</label></th>
<td>
<input id="id_ints_0" name="ints_0" type="number" />
<input id="id_ints_1" name="ints_1" type="number" />
</td>
</tr>
''')
def test_integer_lower_bound_higher(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['10', '2'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_integer_open(self):
field = pg_forms.IntegerRangeField()
value = field.clean(['', '0'])
self.assertEqual(value, NumericRange(None, 0))
def test_integer_incorrect_data_type(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two whole numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_integer_invalid_lower(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_invalid_upper(self):
field = pg_forms.IntegerRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a whole number.')
def test_integer_required(self):
field = pg_forms.IntegerRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean([1, ''])
self.assertEqual(value, NumericRange(1, None))
def test_float_lower_bound_higher(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.8', '1.6'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_float_open(self):
field = pg_forms.FloatRangeField()
value = field.clean(['', '3.1415926'])
self.assertEqual(value, NumericRange(None, 3.1415926))
def test_float_incorrect_data_type(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1.6')
self.assertEqual(cm.exception.messages[0], 'Enter two numbers.')
self.assertEqual(cm.exception.code, 'invalid')
def test_float_invalid_lower(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '3.1415926'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_invalid_upper(self):
field = pg_forms.FloatRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['1.61803399', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a number.')
def test_float_required(self):
field = pg_forms.FloatRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1.61803399', ''])
self.assertEqual(value, NumericRange(1.61803399, None))
def test_date_lower_bound_higher(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', '1976-04-16'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_date_open(self):
field = pg_forms.DateRangeField()
value = field.clean(['', '2013-04-09'])
self.assertEqual(value, DateRange(None, datetime.date(2013, 4, 9)))
def test_date_incorrect_data_type(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('1')
self.assertEqual(cm.exception.messages[0], 'Enter two valid dates.')
self.assertEqual(cm.exception.code, 'invalid')
def test_date_invalid_lower(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['a', '2013-04-09'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_invalid_upper(self):
field = pg_forms.DateRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09', 'b'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date.')
def test_date_required(self):
field = pg_forms.DateRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['1976-04-16', ''])
self.assertEqual(value, DateRange(datetime.date(1976, 4, 16), None))
def test_datetime_lower_bound_higher(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2006-10-25 14:59', '2006-10-25 14:58'])
self.assertEqual(cm.exception.messages[0], 'The start of the range must not exceed the end of the range.')
self.assertEqual(cm.exception.code, 'bound_ordering')
def test_datetime_open(self):
field = pg_forms.DateTimeRangeField()
value = field.clean(['', '2013-04-09 11:45'])
self.assertEqual(value, DateTimeTZRange(None, datetime.datetime(2013, 4, 9, 11, 45)))
def test_datetime_incorrect_data_type(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('2013-04-09 11:45')
self.assertEqual(cm.exception.messages[0], 'Enter two valid date/times.')
self.assertEqual(cm.exception.code, 'invalid')
def test_datetime_invalid_lower(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['45', '2013-04-09 11:45'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_invalid_upper(self):
field = pg_forms.DateTimeRangeField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['2013-04-09 11:45', 'sweet pickles'])
self.assertEqual(cm.exception.messages[0], 'Enter a valid date/time.')
def test_datetime_required(self):
field = pg_forms.DateTimeRangeField(required=True)
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean(['', ''])
self.assertEqual(cm.exception.messages[0], 'This field is required.')
value = field.clean(['2013-04-09 11:45', ''])
self.assertEqual(value, DateTimeTZRange(datetime.datetime(2013, 4, 9, 11, 45), None))
@override_settings(USE_TZ=True, TIME_ZONE='Africa/Johannesburg')
def test_datetime_prepare_value(self):
field = pg_forms.DateTimeRangeField()
value = field.prepare_value(
DateTimeTZRange(datetime.datetime(2015, 5, 22, 16, 6, 33, tzinfo=timezone.utc), None)
)
self.assertEqual(value, [datetime.datetime(2015, 5, 22, 18, 6, 33), None])
def test_model_field_formfield_integer(self):
model_field = pg_fields.IntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_biginteger(self):
model_field = pg_fields.BigIntegerRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.IntegerRangeField)
def test_model_field_formfield_float(self):
model_field = pg_fields.FloatRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.FloatRangeField)
def test_model_field_formfield_date(self):
model_field = pg_fields.DateRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateRangeField)
def test_model_field_formfield_datetime(self):
model_field = pg_fields.DateTimeRangeField()
form_field = model_field.formfield()
self.assertIsInstance(form_field, pg_forms.DateTimeRangeField)
class TestWidget(PostgreSQLTestCase):
def test_range_widget(self):
f = pg_forms.ranges.DateTimeRangeField()
self.assertHTMLEqual(
f.widget.render('datetimerange', ''),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
self.assertHTMLEqual(
f.widget.render('datetimerange', None),
'<input type="text" name="datetimerange_0" /><input type="text" name="datetimerange_1" />'
)
dt_range = DateTimeTZRange(
datetime.datetime(2006, 1, 10, 7, 30),
datetime.datetime(2006, 2, 12, 9, 50)
)
self.assertHTMLEqual(
f.widget.render('datetimerange', dt_range),
'<input type="text" name="datetimerange_0" value="2006-01-10 07:30:00" />'
'<input type="text" name="datetimerange_1" value="2006-02-12 09:50:00" />'
)
| bsd-3-clause |
meteorcloudy/tensorflow | tensorflow/contrib/learn/python/learn/estimators/composable_model.py | 42 | 15409 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow composable models used as building blocks for estimators (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import list_variables
from tensorflow.contrib.framework import load_variable
from tensorflow.contrib.layers.python.layers import feature_column_ops
from tensorflow.python.framework import ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import nn
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.summary import summary
from tensorflow.python.util.deprecation import deprecated
class _ComposableModel(object):
"""ABC for building blocks that can be used to create estimators.
Subclasses need to implement the following methods:
- build_model
- _get_optimizer
See below for the required signatures.
_ComposableModel and its subclasses are not part of the public tf.learn API.
"""
@deprecated(None, "Please use model_fns in tf.estimator.")
def __init__(self,
num_label_columns,
optimizer,
gradient_clip_norm,
num_ps_replicas,
scope,
trainable=True):
"""Common initialization for all _ComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Scope for variables created in this model.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
self._num_label_columns = num_label_columns
self._optimizer = optimizer
self._gradient_clip_norm = gradient_clip_norm
self._num_ps_replicas = num_ps_replicas
self._scope = scope
self._trainable = trainable
self._feature_columns = None
def get_scope_name(self):
"""Returns the scope name used by this model for variables."""
return self._scope
def build_model(self, features, feature_columns, is_training):
"""Builds the model that can calculate the logits.
Args:
features: A mapping from feature columns to tensors.
feature_columns: An iterable containing all the feature columns used
by the model. All items in the set should be instances of
classes derived from `FeatureColumn`.
is_training: Set to True when training, False otherwise.
Returns:
The logits for this model.
"""
raise NotImplementedError
def get_train_step(self, loss):
"""Returns the ops to run to perform a training step on this estimator.
Args:
loss: The loss to use when calculating gradients.
Returns:
The ops to run to perform a training step.
"""
my_vars = self._get_vars()
if not (self._get_feature_columns() or my_vars):
return []
grads = gradients.gradients(loss, my_vars)
if self._gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
return [self._get_optimizer().apply_gradients(zip(grads, my_vars))]
def _get_feature_columns(self):
if not self._feature_columns:
return None
feature_column_ops.check_feature_columns(self._feature_columns)
return sorted(set(self._feature_columns), key=lambda x: x.key)
def _get_vars(self):
if self._get_feature_columns():
return ops.get_collection(self._scope)
return []
def _get_optimizer(self):
if (self._optimizer is None or isinstance(self._optimizer,
six.string_types)):
optimizer = self._get_default_optimizer(self._optimizer)
elif callable(self._optimizer):
optimizer = self._optimizer()
else:
optimizer = self._optimizer
return optimizer
def _get_default_optimizer(self, optimizer_name=None):
raise NotImplementedError
class LinearComposableModel(_ComposableModel):
"""A _ComposableModel that implements linear regression.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Instances of this class can be used to build estimators through the use
of composition.
"""
def __init__(self,
num_label_columns,
optimizer=None,
_joint_weights=False,
gradient_clip_norm=None,
num_ps_replicas=0,
scope=None,
trainable=True):
"""Initializes LinearComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
_joint_weights: If True use a single (possibly partitioned) variable
to store all weights in this model. Faster, but requires that all
feature columns are sparse and have the 'sum' combiner.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Optional scope for variables created in this model. If scope
is not supplied, it will default to 'linear'.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
scope = "linear" if not scope else scope
super(LinearComposableModel, self).__init__(
num_label_columns=num_label_columns,
optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas,
scope=scope,
trainable=trainable)
self._joint_weights = _joint_weights
def get_weights(self, model_dir):
"""Returns weights per feature of the linear part.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The weights created by this model (without the optimizer weights).
"""
all_variables = [name for name, _ in list_variables(model_dir)]
values = {}
optimizer_regex = r".*/" + self._get_optimizer().get_name() + r"(_\d)?$"
for name in all_variables:
if (name.startswith(self._scope + "/") and
name != self._scope + "/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = load_variable(model_dir, name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
def get_bias(self, model_dir):
"""Returns bias of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The bias weights created by this model.
"""
return load_variable(model_dir, name=(self._scope + "/bias_weight"))
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas, min_slice_size=64 << 20)
with variable_scope.variable_scope(
self._scope, values=features.values(),
partitioner=partitioner) as scope:
if self._joint_weights:
logits, _, _ = layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
else:
logits, _, _ = layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=self._get_feature_columns(),
num_outputs=self._num_label_columns,
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
return logits
def _get_default_optimizer(self, optimizer_name=None):
if optimizer_name is None:
optimizer_name = "Ftrl"
default_learning_rate = 1. / math.sqrt(len(self._get_feature_columns()))
default_learning_rate = min(0.2, default_learning_rate)
return layers.OPTIMIZER_CLS_NAMES[optimizer_name](
learning_rate=default_learning_rate)
class DNNComposableModel(_ComposableModel):
"""A _ComposableModel that implements a DNN.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
Instances of this class can be used to build estimators through the use
of composition.
"""
def __init__(self,
num_label_columns,
hidden_units,
optimizer=None,
activation_fn=nn.relu,
dropout=None,
gradient_clip_norm=None,
num_ps_replicas=0,
scope=None,
trainable=True):
"""Initializes DNNComposableModel objects.
Args:
num_label_columns: The number of label columns.
hidden_units: List of hidden units per layer. All layers are fully
connected.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
activation_fn: Activation function applied to each layer. If `None`,
will use `tf.nn.relu`.
dropout: When not None, the probability we will drop out
a given coordinate.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Optional scope for variables created in this model. If not scope
is supplied, one is generated.
trainable: True if this model contains variables that can be trained.
False otherwise (in cases where the variables are used strictly for
transforming input labels for training).
"""
scope = "dnn" if not scope else scope
super(DNNComposableModel, self).__init__(
num_label_columns=num_label_columns,
optimizer=optimizer,
gradient_clip_norm=gradient_clip_norm,
num_ps_replicas=num_ps_replicas,
scope=scope,
trainable=trainable)
self._hidden_units = hidden_units
self._activation_fn = activation_fn
self._dropout = dropout
def get_weights(self, model_dir):
"""Returns the weights of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The weights created by this model.
"""
return [
load_variable(
model_dir, name=(self._scope + "/hiddenlayer_%d/weights" % i))
for i, _ in enumerate(self._hidden_units)
] + [load_variable(
model_dir, name=(self._scope + "/logits/weights"))]
def get_bias(self, model_dir):
"""Returns the bias of the model.
Args:
model_dir: Directory where model parameters, graph and etc. are saved.
Returns:
The bias weights created by this model.
"""
return [
load_variable(
model_dir, name=(self._scope + "/hiddenlayer_%d/biases" % i))
for i, _ in enumerate(self._hidden_units)
] + [load_variable(
model_dir, name=(self._scope + "/logits/biases"))]
def _add_hidden_layer_summary(self, value, tag):
# TODO(zakaria): Move this code to tf.learn and add test.
summary.scalar("%s/fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s/activation" % tag, value)
def build_model(self, features, feature_columns, is_training):
"""See base class."""
self._feature_columns = feature_columns
input_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas, min_slice_size=64 << 20))
with variable_scope.variable_scope(
self._scope + "/input_from_feature_columns",
values=features.values(),
partitioner=input_layer_partitioner) as scope:
net = layers.input_from_feature_columns(
features,
self._get_feature_columns(),
weight_collections=[self._scope],
trainable=self._trainable,
scope=scope)
hidden_layer_partitioner = (
partitioned_variables.min_max_variable_partitioner(
max_partitions=self._num_ps_replicas))
for layer_id, num_hidden_units in enumerate(self._hidden_units):
with variable_scope.variable_scope(
self._scope + "/hiddenlayer_%d" % layer_id,
values=[net],
partitioner=hidden_layer_partitioner) as scope:
net = layers.fully_connected(
net,
num_hidden_units,
activation_fn=self._activation_fn,
variables_collections=[self._scope],
trainable=self._trainable,
scope=scope)
if self._dropout is not None and is_training:
net = layers.dropout(net, keep_prob=(1.0 - self._dropout))
self._add_hidden_layer_summary(net, scope.name)
with variable_scope.variable_scope(
self._scope + "/logits",
values=[net],
partitioner=hidden_layer_partitioner) as scope:
logits = layers.fully_connected(
net,
self._num_label_columns,
activation_fn=None,
variables_collections=[self._scope],
trainable=self._trainable,
scope=scope)
self._add_hidden_layer_summary(logits, "logits")
return logits
def _get_default_optimizer(self, optimizer_name=None):
if optimizer_name is None:
optimizer_name = "Adagrad"
return layers.OPTIMIZER_CLS_NAMES[optimizer_name](learning_rate=0.05)
| apache-2.0 |
donald-pinckney/EM-Simulator | EM Sim/EM SimContent/Lib/encodings/cp857.py | 593 | 34164 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP857.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp857',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x00e0, # LATIN SMALL LETTER A WITH GRAVE
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x00ea, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x00e8, # LATIN SMALL LETTER E WITH GRAVE
0x008b: 0x00ef, # LATIN SMALL LETTER I WITH DIAERESIS
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0131, # LATIN SMALL LETTER DOTLESS I
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x00f2, # LATIN SMALL LETTER O WITH GRAVE
0x0096: 0x00fb, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x0097: 0x00f9, # LATIN SMALL LETTER U WITH GRAVE
0x0098: 0x0130, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x009f: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x00f1, # LATIN SMALL LETTER N WITH TILDE
0x00a5: 0x00d1, # LATIN CAPITAL LETTER N WITH TILDE
0x00a6: 0x011e, # LATIN CAPITAL LETTER G WITH BREVE
0x00a7: 0x011f, # LATIN SMALL LETTER G WITH BREVE
0x00a8: 0x00bf, # INVERTED QUESTION MARK
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x00a1, # INVERTED EXCLAMATION MARK
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x00c0, # LATIN CAPITAL LETTER A WITH GRAVE
0x00b8: 0x00a9, # COPYRIGHT SIGN
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x00a2, # CENT SIGN
0x00be: 0x00a5, # YEN SIGN
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x00e3, # LATIN SMALL LETTER A WITH TILDE
0x00c7: 0x00c3, # LATIN CAPITAL LETTER A WITH TILDE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x00ba, # MASCULINE ORDINAL INDICATOR
0x00d1: 0x00aa, # FEMININE ORDINAL INDICATOR
0x00d2: 0x00ca, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x00c8, # LATIN CAPITAL LETTER E WITH GRAVE
0x00d5: None, # UNDEFINED
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x00cf, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x00a6, # BROKEN BAR
0x00de: 0x00cc, # LATIN CAPITAL LETTER I WITH GRAVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x00d2, # LATIN CAPITAL LETTER O WITH GRAVE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: None, # UNDEFINED
0x00e8: 0x00d7, # MULTIPLICATION SIGN
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x00db, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00eb: 0x00d9, # LATIN CAPITAL LETTER U WITH GRAVE
0x00ed: 0x00ff, # LATIN SMALL LETTER Y WITH DIAERESIS
0x00ee: 0x00af, # MACRON
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: None, # UNDEFINED
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x0085 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xea' # 0x0088 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x008a -> LATIN SMALL LETTER E WITH GRAVE
u'\xef' # 0x008b -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0131' # 0x008d -> LATIN SMALL LETTER DOTLESS I
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
u'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0x0095 -> LATIN SMALL LETTER O WITH GRAVE
u'\xfb' # 0x0096 -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xf9' # 0x0097 -> LATIN SMALL LETTER U WITH GRAVE
u'\u0130' # 0x0098 -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
u'\xa3' # 0x009c -> POUND SIGN
u'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
u'\u015e' # 0x009e -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u015f' # 0x009f -> LATIN SMALL LETTER S WITH CEDILLA
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\xf1' # 0x00a4 -> LATIN SMALL LETTER N WITH TILDE
u'\xd1' # 0x00a5 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u011e' # 0x00a6 -> LATIN CAPITAL LETTER G WITH BREVE
u'\u011f' # 0x00a7 -> LATIN SMALL LETTER G WITH BREVE
u'\xbf' # 0x00a8 -> INVERTED QUESTION MARK
u'\xae' # 0x00a9 -> REGISTERED SIGN
u'\xac' # 0x00aa -> NOT SIGN
u'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
u'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
u'\xa1' # 0x00ad -> INVERTED EXCLAMATION MARK
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc0' # 0x00b7 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xa9' # 0x00b8 -> COPYRIGHT SIGN
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\xa2' # 0x00bd -> CENT SIGN
u'\xa5' # 0x00be -> YEN SIGN
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\xe3' # 0x00c6 -> LATIN SMALL LETTER A WITH TILDE
u'\xc3' # 0x00c7 -> LATIN CAPITAL LETTER A WITH TILDE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\xba' # 0x00d0 -> MASCULINE ORDINAL INDICATOR
u'\xaa' # 0x00d1 -> FEMININE ORDINAL INDICATOR
u'\xca' # 0x00d2 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x00d4 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\ufffe' # 0x00d5 -> UNDEFINED
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x00d8 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\xa6' # 0x00dd -> BROKEN BAR
u'\xcc' # 0x00de -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd2' # 0x00e3 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
u'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xb5' # 0x00e6 -> MICRO SIGN
u'\ufffe' # 0x00e7 -> UNDEFINED
u'\xd7' # 0x00e8 -> MULTIPLICATION SIGN
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0x00ea -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0x00eb -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xec' # 0x00ec -> LATIN SMALL LETTER I WITH GRAVE
u'\xff' # 0x00ed -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xaf' # 0x00ee -> MACRON
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
u'\ufffe' # 0x00f2 -> UNDEFINED
u'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
u'\xb6' # 0x00f4 -> PILCROW SIGN
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\xb7' # 0x00fa -> MIDDLE DOT
u'\xb9' # 0x00fb -> SUPERSCRIPT ONE
u'\xb3' # 0x00fc -> SUPERSCRIPT THREE
u'\xb2' # 0x00fd -> SUPERSCRIPT TWO
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a1: 0x00ad, # INVERTED EXCLAMATION MARK
0x00a2: 0x00bd, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a5: 0x00be, # YEN SIGN
0x00a6: 0x00dd, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00a9: 0x00b8, # COPYRIGHT SIGN
0x00aa: 0x00d1, # FEMININE ORDINAL INDICATOR
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00af: 0x00ee, # MACRON
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b8: 0x00f7, # CEDILLA
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00ba: 0x00d0, # MASCULINE ORDINAL INDICATOR
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00bf: 0x00a8, # INVERTED QUESTION MARK
0x00c0: 0x00b7, # LATIN CAPITAL LETTER A WITH GRAVE
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c3: 0x00c7, # LATIN CAPITAL LETTER A WITH TILDE
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c8: 0x00d4, # LATIN CAPITAL LETTER E WITH GRAVE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00ca: 0x00d2, # LATIN CAPITAL LETTER E WITH CIRCUMFLEX
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cc: 0x00de, # LATIN CAPITAL LETTER I WITH GRAVE
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00cf: 0x00d8, # LATIN CAPITAL LETTER I WITH DIAERESIS
0x00d1: 0x00a5, # LATIN CAPITAL LETTER N WITH TILDE
0x00d2: 0x00e3, # LATIN CAPITAL LETTER O WITH GRAVE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x00e8, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00d9: 0x00eb, # LATIN CAPITAL LETTER U WITH GRAVE
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00db: 0x00ea, # LATIN CAPITAL LETTER U WITH CIRCUMFLEX
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e0: 0x0085, # LATIN SMALL LETTER A WITH GRAVE
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e3: 0x00c6, # LATIN SMALL LETTER A WITH TILDE
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e8: 0x008a, # LATIN SMALL LETTER E WITH GRAVE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00ea: 0x0088, # LATIN SMALL LETTER E WITH CIRCUMFLEX
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ec: 0x00ec, # LATIN SMALL LETTER I WITH GRAVE
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00ef: 0x008b, # LATIN SMALL LETTER I WITH DIAERESIS
0x00f1: 0x00a4, # LATIN SMALL LETTER N WITH TILDE
0x00f2: 0x0095, # LATIN SMALL LETTER O WITH GRAVE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00f9: 0x0097, # LATIN SMALL LETTER U WITH GRAVE
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fb: 0x0096, # LATIN SMALL LETTER U WITH CIRCUMFLEX
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00ff: 0x00ed, # LATIN SMALL LETTER Y WITH DIAERESIS
0x011e: 0x00a6, # LATIN CAPITAL LETTER G WITH BREVE
0x011f: 0x00a7, # LATIN SMALL LETTER G WITH BREVE
0x0130: 0x0098, # LATIN CAPITAL LETTER I WITH DOT ABOVE
0x0131: 0x008d, # LATIN SMALL LETTER DOTLESS I
0x015e: 0x009e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x009f, # LATIN SMALL LETTER S WITH CEDILLA
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| apache-2.0 |
shantanu561993/volatility | volatility/plugins/mac/session_hash_table.py | 12 | 2938 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: atcuno@gmail.com
@organization:
"""
import volatility.plugins.mac.pslist as pslist
import volatility.obj as obj
import volatility.plugins.mac.common as common
from volatility.renderers import TreeGrid
class mac_list_sessions(pslist.mac_pslist):
""" Enumerates sessions """
def calculate(self):
common.set_plugin_members(self)
shash_addr = self.addr_space.profile.get_symbol("_sesshash")
shash = obj.Object("unsigned long", offset = shash_addr, vm = self.addr_space)
shashtbl_addr = self.addr_space.profile.get_symbol("_sesshashtbl")
shashtbl_ptr = obj.Object("Pointer", offset = shashtbl_addr, vm = self.addr_space)
shash_array = obj.Object(theType = "Array", targetType = "sesshashhead", count = shash + 1, vm = self.addr_space, offset = shashtbl_ptr)
for sess in shash_array:
s = sess.lh_first
while s:
yield s
s = s.s_hash.le_next
def unified_output(self, data):
return TreeGrid([("Leader (Pid)", int),
("Leader (Name)", str),
("Login Name", str),
], self.generator(data))
def generator(self, data):
for sess in data:
pid = -1
pname = "<INVALID LEADER>"
if sess.s_leader:
pid = sess.s_leader.p_pid
pname = sess.s_leader.p_comm
yield(0, [
int(pid),
str(pname),
str(sess.s_login),
])
def render_text(self, outfd, data):
self.table_header(outfd, [("Leader (Pid)", "8"),
("Leader (Name)", "20"),
("Login Name", "25")])
for sess in data:
pid = -1
pname = "<INVALID LEADER>"
if sess.s_leader:
pid = sess.s_leader.p_pid
pname = sess.s_leader.p_comm
self.table_row(outfd, pid, pname, sess.s_login)
| gpl-2.0 |
Learningtribes/edx-platform | common/lib/xmodule/xmodule/randomize_module.py | 27 | 3876 | import logging
import random
from xmodule.x_module import XModule, STUDENT_VIEW
from xmodule.seq_module import SequenceDescriptor
from lxml import etree
from xblock.fields import Scope, Integer
from xblock.fragment import Fragment
log = logging.getLogger('edx.' + __name__)
class RandomizeFields(object):
choice = Integer(help="Which random child was chosen", scope=Scope.user_state)
class RandomizeModule(RandomizeFields, XModule):
"""
Chooses a random child module. Chooses the same one every time for each student.
Example:
<randomize>
<problem url_name="problem1" />
<problem url_name="problem2" />
<problem url_name="problem3" />
</randomize>
User notes:
- If you're randomizing amongst graded modules, each of them MUST be worth the same
number of points. Otherwise, the earth will be overrun by monsters from the
deeps. You have been warned.
Technical notes:
- There is more dark magic in this code than I'd like. The whole varying-children +
grading interaction is a tangle between super and subclasses of descriptors and
modules.
"""
def __init__(self, *args, **kwargs):
super(RandomizeModule, self).__init__(*args, **kwargs)
# NOTE: calling self.get_children() doesn't work until we've picked a choice
num_choices = len(self.descriptor.get_children())
if self.choice > num_choices:
# Oops. Children changed. Reset.
self.choice = None
if self.choice is None:
# choose one based on the system seed, or randomly if that's not available
if num_choices > 0:
if self.system.seed is not None:
self.choice = self.system.seed % num_choices
else:
self.choice = random.randrange(0, num_choices)
if self.choice is not None:
# Now get_children() should return a list with one element
log.debug("children of randomize module (should be only 1): %s", self.child)
@property
def child_descriptor(self):
""" Return descriptor of selected choice """
if self.choice is None:
return None
return self.descriptor.get_children()[self.choice]
@property
def child(self):
""" Return module instance of selected choice """
child_descriptor = self.child_descriptor
if child_descriptor is None:
return None
return self.system.get_module(child_descriptor)
def get_child_descriptors(self):
"""
For grading--return just the chosen child.
"""
if self.child_descriptor is None:
return []
return [self.child_descriptor]
def student_view(self, context):
if self.child is None:
# raise error instead? In fact, could complain on descriptor load...
return Fragment(content=u"<div>Nothing to randomize between</div>")
return self.child.render(STUDENT_VIEW, context)
def get_icon_class(self):
return self.child.get_icon_class() if self.child else 'other'
class RandomizeDescriptor(RandomizeFields, SequenceDescriptor):
# the editing interface can be the same as for sequences -- just a container
module_class = RandomizeModule
resources_dir = None
filename_extension = "xml"
show_in_read_only_mode = True
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('randomize')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
return xml_object
def has_dynamic_children(self):
"""
Grading needs to know that only one of the children is actually "real". This
makes it use module.get_child_descriptors().
"""
return True
| agpl-3.0 |
zentner-kyle/servo | tests/wpt/harness/wptrunner/manifestinclude.py | 39 | 4804 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""Manifest structure used to store paths that should be included in a test run.
The manifest is represented by a tree of IncludeManifest objects, the root
representing the file and each subnode representing a subdirectory that should
be included or excluded.
"""
import os
import urlparse
from wptmanifest.node import DataNode
from wptmanifest.backends import conditional
from wptmanifest.backends.conditional import ManifestItem
class IncludeManifest(ManifestItem):
def __init__(self, node):
"""Node in a tree structure representing the paths
that should be included or excluded from the test run.
:param node: AST Node corresponding to this Node.
"""
ManifestItem.__init__(self, node)
self.child_map = {}
@classmethod
def create(cls):
"""Create an empty IncludeManifest tree"""
node = DataNode(None)
return cls(node)
def append(self, child):
ManifestItem.append(self, child)
self.child_map[child.name] = child
assert len(self.child_map) == len(self.children)
def include(self, test):
"""Return a boolean indicating whether a particular test should be
included in a test run, based on the IncludeManifest tree rooted on
this object.
:param test: The test object"""
path_components = self._get_components(test.url)
return self._include(test, path_components)
def _include(self, test, path_components):
if path_components:
next_path_part = path_components.pop()
if next_path_part in self.child_map:
return self.child_map[next_path_part]._include(test, path_components)
node = self
while node:
try:
skip_value = self.get("skip", {"test_type": test.item_type}).lower()
assert skip_value in ("true", "false")
return False if skip_value == "true" else True
except KeyError:
if node.parent is not None:
node = node.parent
else:
# Include by default
return True
def _get_components(self, url):
rv = []
url_parts = urlparse.urlsplit(url)
variant = ""
if url_parts.query:
variant += "?" + url_parts.query
if url_parts.fragment:
variant += "#" + url_parts.fragment
if variant:
rv.append(variant)
rv.extend([item for item in reversed(url_parts.path.split("/")) if item])
return rv
def _add_rule(self, test_manifests, url, direction):
maybe_path = os.path.join(os.path.abspath(os.curdir), url)
rest, last = os.path.split(maybe_path)
variant = ""
if "#" in last:
last, fragment = last.rsplit("#", 1)
variant += "#" + fragment
if "?" in last:
last, query = last.rsplit("?", 1)
variant += "?" + query
maybe_path = os.path.join(rest, last)
if os.path.exists(maybe_path):
for manifest, data in test_manifests.iteritems():
rel_path = os.path.relpath(maybe_path, data["tests_path"])
if ".." not in rel_path.split(os.sep):
url = data["url_base"] + rel_path.replace(os.path.sep, "/") + variant
break
assert direction in ("include", "exclude")
components = self._get_components(url)
node = self
while components:
component = components.pop()
if component not in node.child_map:
new_node = IncludeManifest(DataNode(component))
node.append(new_node)
node = node.child_map[component]
skip = False if direction == "include" else True
node.set("skip", str(skip))
def add_include(self, test_manifests, url_prefix):
"""Add a rule indicating that tests under a url path
should be included in test runs
:param url_prefix: The url prefix to include
"""
return self._add_rule(test_manifests, url_prefix, "include")
def add_exclude(self, test_manifests, url_prefix):
"""Add a rule indicating that tests under a url path
should be excluded from test runs
:param url_prefix: The url prefix to exclude
"""
return self._add_rule(test_manifests, url_prefix, "exclude")
def get_manifest(manifest_path):
with open(manifest_path) as f:
return conditional.compile(f, data_cls_getter=lambda x, y: IncludeManifest)
| mpl-2.0 |
mulkieran/justbytes | doc/conf.py | 1 | 9393 | # -*- coding: utf-8 -*-
#
# justbytes documentation build configuration file, created by
# sphinx-quickstart on Tue Feb 17 09:02:33 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
import justbytes
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.pardir))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.coverage",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"justbytes"
copyright = u"2015, mulhern"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ".".join(justbytes.__version__.split(".")[:2])
# The full version, including alpha/beta/rc tags.
release = justbytes.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = "justbytesdoc"
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "justbytes.tex", u"justbytes Documentation", u"mulhern", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "justbytes", u"justbytes Documentation", [u"mulhern"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"justbytes",
u"justbytes Documentation",
u"mulhern",
"justbytes",
"One line description of project.",
"Miscellaneous",
),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u"justbytes"
epub_author = u"mulhern"
epub_publisher = u"mulhern"
epub_copyright = u"2015, mulhern"
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
# epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"http://docs.python.org/": None}
| gpl-2.0 |
ther12k/android-client | phone/jni/pjsip/sources/pkgconfig.py | 75 | 4188 | import sys
import os
REMOVE_THESE = ["-I/usr/include", "-I/usr/include/", "-L/usr/lib", "-L/usr/lib/"]
class Pkg:
def __init__(self, pkg_name):
self.name = pkg_name
self.priority = 0
self.vars = {}
def parse(self, pkg_config_path):
f = None
for pkg_path in pkg_config_path.split(':'):
if pkg_path[-1] != '/':
pkg_path += '/'
fname = pkg_path + self.name + '.pc'
try:
f = open(fname, "r")
break
except:
continue
if not f:
#sys.stderr.write("pkgconfig.py: unable to find %s.pc in %s\n" % (self.name, pkg_config_path))
return False
for line in f.readlines():
line = line.strip()
if not line:
continue
if line[0]=='#':
continue
pos1 = line.find('=')
pos2 = line.find(':')
if pos1 > 0 and (pos1 < pos2 or pos2 < 0):
pos = pos1
elif pos2 > 0 and (pos2 < pos1 or pos1 < 0):
pos = pos2
else:
continue
name = line[:pos].lower()
value = line[pos+1:]
self.vars[name] = value
f.close()
for name in self.vars.keys():
value = self.vars[name]
while True:
pos1 = value.find("${")
if pos1 < 0:
break
pos2 = value.find("}")
if pos2 < 0:
break
value = value.replace(value[pos1:pos2+1], self.vars[value[pos1+2:pos2]])
self.vars[name] = value
return True
def requires(self):
if not 'requires' in self.vars:
return []
deps = []
req_list = self.vars['requires']
for req_item in req_list.split(','):
req_item = req_item.strip()
for i in range(len(req_item)):
if "=<>".find(req_item[i]) >= 0:
deps.append(req_item[:i].strip())
break
return deps
def libs(self):
if not 'libs' in self.vars:
return []
return self.vars['libs'].split(' ')
def cflags(self):
if not 'cflags' in self.vars:
return []
return self.vars['cflags'].split(' ')
def calculate_pkg_priority(pkg, pkg_dict, loop_cnt):
if loop_cnt > 10:
sys.stderr.write("Circular dependency with pkg %s\n" % (pkg))
return 0
reqs = pkg.requires()
prio = 1
for req in reqs:
if not req in pkg_dict:
continue
req_pkg = pkg_dict[req]
prio += calculate_pkg_priority(req_pkg, pkg_dict, loop_cnt+1)
return prio
if __name__ == "__main__":
pkg_names = []
pkg_dict = {}
commands = []
exist_check = False
for i in range(1,len(sys.argv)):
if sys.argv[i][0] == '-':
cmd = sys.argv[i]
commands.append(cmd)
if cmd=='--exists':
exist_check = True
elif cmd=="--help":
print "This is not very helpful, is it"
sys.exit(0)
elif cmd=="--version":
print "0.1"
sys.exit(0)
else:
pkg_names.append(sys.argv[i])
# Fix search path
PKG_CONFIG_PATH = os.getenv("PKG_CONFIG_PATH", "").strip()
if not PKG_CONFIG_PATH:
PKG_CONFIG_PATH="/usr/local/lib/pkgconfig:/usr/lib/pkgconfig"
PKG_CONFIG_PATH = PKG_CONFIG_PATH.replace(";", ":")
# Parse files
for pkg_name in pkg_names:
pkg = Pkg(pkg_name)
if not pkg.parse(PKG_CONFIG_PATH):
sys.exit(1)
pkg_dict[pkg_name] = pkg
if exist_check:
sys.exit(0)
# Calculate priority based on dependency
for pkg_name in pkg_dict.keys():
pkg = pkg_dict[pkg_name]
pkg.priority = calculate_pkg_priority(pkg, pkg_dict, 1)
# Sort package based on dependency
pkg_names = sorted(pkg_names, key=lambda pkg_name: pkg_dict[pkg_name].priority, reverse=True)
# Get the options
opts = []
for cmd in commands:
if cmd=='--libs':
for pkg_name in pkg_names:
libs = pkg_dict[pkg_name].libs()
for lib in libs:
opts.append(lib)
if lib[:2]=="-l":
break
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].libs()
elif cmd=='--cflags':
for pkg_name in pkg_names:
opts += pkg_dict[pkg_name].cflags()
elif cmd[0]=='-':
sys.stderr.write("pkgconfig.py: I don't know how to handle " + sys.argv[i] + "\n")
filtered_opts = []
for opt in opts:
opt = opt.strip()
if not opt:
continue
if REMOVE_THESE.count(opt) != 0:
continue
if opt != '-framework' and opt != '--framework' and filtered_opts.count(opt) != 0:
if len(filtered_opts) and (filtered_opts[-1] == '-framework' or filtered_opts[-1] == '--framework'):
filtered_opts.pop()
continue
filtered_opts.append(opt)
print ' '.join(filtered_opts)
| gpl-3.0 |
sloanyang/gyp | test/variables/commands/gyptest-commands.py | 311 | 1208 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test variable expansion of '<!()' syntax commands.
"""
import os
import TestGyp
test = TestGyp.TestGyp(format='gypd')
expect = test.read('commands.gyp.stdout').replace('\r', '')
test.run_gyp('commands.gyp',
'--debug', 'variables',
stdout=expect, ignore_line_numbers=True)
# Verify the commands.gypd against the checked-in expected contents.
#
# Normally, we should canonicalize line endings in the expected
# contents file setting the Subversion svn:eol-style to native,
# but that would still fail if multiple systems are sharing a single
# workspace on a network-mounted file system. Consequently, we
# massage the Windows line endings ('\r\n') in the output to the
# checked-in UNIX endings ('\n').
contents = test.read('commands.gypd').replace('\r', '')
expect = test.read('commands.gypd.golden').replace('\r', '')
if not test.match(contents, expect):
print "Unexpected contents of `commands.gypd'"
test.diff(expect, contents, 'commands.gypd ')
test.fail_test()
test.pass_test()
| bsd-3-clause |
samdoran/ansible | lib/ansible/utils/path.py | 32 | 3357 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from errno import EEXIST
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_native, to_text
__all__ = ['unfrackpath', 'makedirs_safe']
def unfrackpath(path, follow=True):
'''
Returns a path that is free of symlinks (if follow=True), environment variables, relative path traversals and symbols (~)
:arg path: A byte or text string representing a path to be canonicalized
:arg follow: A boolean to indicate of symlinks should be resolved or not
:raises UnicodeDecodeError: If the canonicalized version of the path
contains non-utf8 byte sequences.
:rtype: A text string (unicode on pyyhon2, str on python3).
:returns: An absolute path with symlinks, environment variables, and tilde
expanded. Note that this does not check whether a path exists.
example::
'$HOME/../../var/mail' becomes '/var/spool/mail'
'''
if follow:
final_path = os.path.normpath(os.path.realpath(os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))))
else:
final_path = os.path.normpath(os.path.abspath(os.path.expanduser(os.path.expandvars(to_bytes(path, errors='surrogate_or_strict')))))
return to_text(final_path, errors='surrogate_or_strict')
def makedirs_safe(path, mode=None):
'''Safe way to create dirs in muliprocess/thread environments.
:arg path: A byte or text string representing a directory to be created
:kwarg mode: If given, the mode to set the directory to
:raises AnsibleError: If the directory cannot be created and does not already exists.
:raises UnicodeDecodeError: if the path is not decodable in the utf-8 encoding.
'''
rpath = unfrackpath(path)
b_rpath = to_bytes(rpath)
if not os.path.exists(b_rpath):
try:
if mode:
os.makedirs(b_rpath, mode)
else:
os.makedirs(b_rpath)
except OSError as e:
if e.errno != EEXIST:
raise AnsibleError("Unable to create local directories(%s): %s" % (to_native(rpath), to_native(e)))
def basedir(source):
""" returns directory for inventory or playbook """
dname = None
if os.path.isdir(source):
dname = source
elif source in [None, '', '.']:
dname = os.getcwd()
elif os.path.isfile(source):
dname = os.path.dirname(source)
if dname:
# don't follow symlinks for basedir, enables source re-use
dname = os.path.abspath(dname)
return dname
| gpl-3.0 |
kidmaple/CoolWall | user/python/Lib/plat-linux2/SOCKET.py | 4 | 3569 | # Generated by h2py from /usr/include/sys/socket.h
# Included from features.h
_FEATURES_H = 1
_GNU_SOURCE = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__GNU_LIBRARY__ = 1
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __P(args): return args
def __P(args): return args
def __P(args): return ()
def __STRING(x): return #x
def __STRING(x): return "x"
# Included from linux/socket.h
# Included from asm/socket.h
# Included from asm/sockios.h
FIOSETOWN = 0x8901
SIOCSPGRP = 0x8902
FIOGETOWN = 0x8903
SIOCGPGRP = 0x8904
SIOCATMARK = 0x8905
SIOCGSTAMP = 0x8906
SOL_SOCKET = 1
SO_DEBUG = 1
SO_REUSEADDR = 2
SO_TYPE = 3
SO_ERROR = 4
SO_DONTROUTE = 5
SO_BROADCAST = 6
SO_SNDBUF = 7
SO_RCVBUF = 8
SO_KEEPALIVE = 9
SO_OOBINLINE = 10
SO_NO_CHECK = 11
SO_PRIORITY = 12
SO_LINGER = 13
SO_BSDCOMPAT = 14
# Included from linux/sockios.h
SIOCADDRT = 0x890B
SIOCDELRT = 0x890C
SIOCGIFNAME = 0x8910
SIOCSIFLINK = 0x8911
SIOCGIFCONF = 0x8912
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFDSTADDR = 0x8917
SIOCSIFDSTADDR = 0x8918
SIOCGIFBRDADDR = 0x8919
SIOCSIFBRDADDR = 0x891a
SIOCGIFNETMASK = 0x891b
SIOCSIFNETMASK = 0x891c
SIOCGIFMETRIC = 0x891d
SIOCSIFMETRIC = 0x891e
SIOCGIFMEM = 0x891f
SIOCSIFMEM = 0x8920
SIOCGIFMTU = 0x8921
SIOCSIFMTU = 0x8922
SIOCSIFHWADDR = 0x8924
SIOCGIFENCAP = 0x8925
SIOCSIFENCAP = 0x8926
SIOCGIFHWADDR = 0x8927
SIOCGIFSLAVE = 0x8929
SIOCSIFSLAVE = 0x8930
SIOCADDMULTI = 0x8931
SIOCDELMULTI = 0x8932
SIOCGIFBR = 0x8940
SIOCSIFBR = 0x8941
OLD_SIOCDARP = 0x8950
OLD_SIOCGARP = 0x8951
OLD_SIOCSARP = 0x8952
SIOCDARP = 0x8953
SIOCGARP = 0x8954
SIOCSARP = 0x8955
SIOCDRARP = 0x8960
SIOCGRARP = 0x8961
SIOCSRARP = 0x8962
SIOCGIFMAP = 0x8970
SIOCSIFMAP = 0x8971
SIOCADDDLCI = 0x8980
SIOCDELDLCI = 0x8981
SIOCDEVPRIVATE = 0x89F0
SIOCPROTOPRIVATE = 0x89E0
# Included from linux/uio.h
UIO_MAXIOV = 16
SCM_RIGHTS = 1
SOCK_STREAM = 1
SOCK_DGRAM = 2
SOCK_RAW = 3
SOCK_RDM = 4
SOCK_SEQPACKET = 5
SOCK_PACKET = 10
AF_UNSPEC = 0
AF_UNIX = 1
AF_INET = 2
AF_AX25 = 3
AF_IPX = 4
AF_APPLETALK = 5
AF_NETROM = 6
AF_BRIDGE = 7
AF_AAL5 = 8
AF_X25 = 9
AF_INET6 = 10
AF_MAX = 12
PF_UNSPEC = AF_UNSPEC
PF_UNIX = AF_UNIX
PF_INET = AF_INET
PF_AX25 = AF_AX25
PF_IPX = AF_IPX
PF_APPLETALK = AF_APPLETALK
PF_NETROM = AF_NETROM
PF_BRIDGE = AF_BRIDGE
PF_AAL5 = AF_AAL5
PF_X25 = AF_X25
PF_INET6 = AF_INET6
PF_MAX = AF_MAX
SOMAXCONN = 128
MSG_OOB = 1
MSG_PEEK = 2
MSG_DONTROUTE = 4
MSG_PROXY = 16
SOL_IP = 0
SOL_IPX = 256
SOL_AX25 = 257
SOL_ATALK = 258
SOL_NETROM = 259
SOL_TCP = 6
SOL_UDP = 17
IP_TOS = 1
IPTOS_LOWDELAY = 0x10
IPTOS_THROUGHPUT = 0x08
IPTOS_RELIABILITY = 0x04
IPTOS_MINCOST = 0x02
IP_TTL = 2
IP_HDRINCL = 3
IP_OPTIONS = 4
IP_MULTICAST_IF = 32
IP_MULTICAST_TTL = 33
IP_MULTICAST_LOOP = 34
IP_ADD_MEMBERSHIP = 35
IP_DROP_MEMBERSHIP = 36
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MAX_MEMBERSHIPS = 20
IPX_TYPE = 1
TCP_NODELAY = 1
TCP_MAXSEG = 2
SOPRI_INTERACTIVE = 0
SOPRI_NORMAL = 1
SOPRI_BACKGROUND = 2
# Included from sys/types.h
# Included from linux/types.h
# Included from linux/posix_types.h
__FD_SETSIZE = 1024
def __FDELT(d): return ((d) / __NFDBITS)
# Included from asm/posix_types.h
def __FD_ZERO(fdsetp): return \
# Included from asm/types.h
# Included from sys/bitypes.h
# Included from pthread/mit/posix.h
# Included from pthread/mit/types.h
# Included from pthread/mit/xtypes.h
# Included from pthread/mit/sys/types.h
| gpl-2.0 |
Yukarumya/Yukarum-Redfoxes | python/pytest/_pytest/pastebin.py | 181 | 3483 | """ submit failure or test session information to a pastebin service. """
import pytest
import sys
import tempfile
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting")
group._addoption('--pastebin', metavar="mode",
action='store', dest="pastebin", default=None,
choices=['failed', 'all'],
help="send failed|all info to bpaste.net pastebin service.")
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
import py
if config.option.pastebin == "all":
tr = config.pluginmanager.getplugin('terminalreporter')
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if tr is not None:
# pastebin file will be utf-8 encoded binary file
config._pastebinfile = tempfile.TemporaryFile('w+b')
oldwrite = tr._tw.write
def tee_write(s, **kwargs):
oldwrite(s, **kwargs)
if py.builtin._istext(s):
s = s.encode('utf-8')
config._pastebinfile.write(s)
tr._tw.write = tee_write
def pytest_unconfigure(config):
if hasattr(config, '_pastebinfile'):
# get terminal contents and delete file
config._pastebinfile.seek(0)
sessionlog = config._pastebinfile.read()
config._pastebinfile.close()
del config._pastebinfile
# undo our patching in the terminal reporter
tr = config.pluginmanager.getplugin('terminalreporter')
del tr._tw.__dict__['write']
# write summary
tr.write_sep("=", "Sending information to Paste Service")
pastebinurl = create_new_paste(sessionlog)
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
def create_new_paste(contents):
"""
Creates a new paste using bpaste.net service.
:contents: paste contents as utf-8 encoded bytes
:returns: url to the pasted contents
"""
import re
if sys.version_info < (3, 0):
from urllib import urlopen, urlencode
else:
from urllib.request import urlopen
from urllib.parse import urlencode
params = {
'code': contents,
'lexer': 'python3' if sys.version_info[0] == 3 else 'python',
'expiry': '1week',
}
url = 'https://bpaste.net'
response = urlopen(url, data=urlencode(params).encode('ascii')).read()
m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
if m:
return '%s/show/%s' % (url, m.group(1))
else:
return 'bad response: ' + response
def pytest_terminal_summary(terminalreporter):
import _pytest.config
if terminalreporter.config.option.pastebin != "failed":
return
tr = terminalreporter
if 'failed' in tr.stats:
terminalreporter.write_sep("=", "Sending information to Paste Service")
for rep in terminalreporter.stats.get('failed'):
try:
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
except AttributeError:
msg = tr._getfailureheadline(rep)
tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
rep.toterminal(tw)
s = tw.stringio.getvalue()
assert len(s)
pastebinurl = create_new_paste(s)
tr.write_line("%s --> %s" %(msg, pastebinurl))
| mpl-2.0 |
lokeshjindal15/pd-gem5 | src/arch/x86/isa/insts/x87/transcendental_functions/__init__.py | 91 | 2337 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
categories = ["trigonometric_functions",
"logarithmic_functions"]
microcode = '''
# X86 microcode
'''
for category in categories:
exec "import %s as cat" % category
microcode += cat.microcode
| bsd-3-clause |
richardnpaul/FWL-Website | lib/python2.7/site-packages/pip/vendor/html5lib/trie/datrie.py | 80 | 1177 | from __future__ import absolute_import, division, unicode_literals
from datrie import Trie as DATrie
from pip.vendor.six import text_type
from ._base import Trie as ABCTrie
class Trie(ABCTrie):
def __init__(self, data):
chars = set()
for key in data.keys():
if not isinstance(key, text_type):
raise TypeError("All keys must be strings")
for char in key:
chars.add(char)
self._data = DATrie("".join(chars))
for key, value in data.items():
self._data[key] = value
def __contains__(self, key):
return key in self._data
def __len__(self):
return len(self._data)
def __iter__(self):
raise NotImplementedError()
def __getitem__(self, key):
return self._data[key]
def keys(self, prefix=None):
return self._data.keys(prefix)
def has_keys_with_prefix(self, prefix):
return self._data.has_keys_with_prefix(prefix)
def longest_prefix(self, prefix):
return self._data.longest_prefix(prefix)
def longest_prefix_item(self, prefix):
return self._data.longest_prefix_item(prefix)
| gpl-3.0 |
MMKrell/pyspace | pySPACE/resources/dataset_defs/time_series.py | 2 | 39469 | """ Load and store data of the type :mod:`pySPACE.resources.data_types.time_series` """
import itertools
import copy
import os
import cPickle
import sys
import scipy
import yaml
import csv
import numpy
import logging
import warnings
import glob
from pySPACE.missions.support.WindowerInterface import AbstractStreamReader
from pySPACE.missions.support.windower import MarkerWindower
from pySPACE.tools.filesystem import get_author
from pySPACE.resources.dataset_defs.base import BaseDataset
class TimeSeriesDataset(BaseDataset):
""" Loading and storing a time series dataset
This class encapsulate most relevant code for dealing with time series
datasets, most importantly for loading and storing them to the file system.
These datasets consist of
:mod:`~pySPACE.resources.data_types.time_series` objects.
They can be loaded with a
:mod:`~pySPACE.missions.nodes.source.time_series_source` and saved with a
:mod:`~pySPACE.missions.nodes.sink.time_series_sink` node
in a :class:`~pySPACE.missions.operations.node_chain.NodeChainOperation`.
The standard *storage_format* is 'pickle',
but it is also possible to load Matlab format ('mat') or
BrainComputerInterface-competition data. For that, ``storage_format`` has
to be set in the format **bci_comp_[competition number]_[dataset number]**
in the metadata.yaml file. For example, **bci_comp_2_4** means loading of
time series from BCI Competition II (2003), dataset IV.
Currently, the following datasets can be loaded:
- BCI Competition II, dataset IV: self-paced key typing (left vs. right)
- BCI Competition III, dataset II: P300 speller paradigm, training data
See http://www.bbci.de/competition/ for further information.
For saving the data, other formats are currently supported but not
yet for loading the data.
This issue can be handled by processing the data with a node chain
operation which transforms the data into feature vectors and
use the respective storing and loading functionality, e.g., with csv and
arff files.
There is also a node for transforming feature vectors back to
TimeSeries objects.
**Parameters**
:dataset_md:
A dictionary with all the meta data.
(*optional, default: None*)
:sort_string:
A lambda function string that is evaluated before the data is stored.
(*optional, default: None*)
**Known issues**
The BCI Competition III dataset II should be actually loaded as a
streaming dataset to enable different possibilities for windowing.
Segment ends (i.e., where a new letter starts) can be coded as marker.
"""
def __init__(self, dataset_md=None, sort_string=None, **kwargs):
super(TimeSeriesDataset, self).__init__(dataset_md=dataset_md)
self.stream_mode = False
if dataset_md is not None:
dataset_dir = self.meta_data["dataset_directory"]
s_format = self.meta_data["storage_format"]
if type(s_format) == list:
f_format = s_format[0]
else:
f_format = s_format
# Loading depends on whether data is split into
# training and test data, whether different splits exist and whether
# several runs have been conducted.
if f_format in ["pickle", "mat"] and not self.meta_data["train_test"] \
and self.meta_data["splits"] == 1 \
and self.meta_data["runs"] == 1:
# The dataset consists only of a single set of data, for
# one run, one splitting, and only test data
data = dataset_md["data_pattern"].replace("_run", "_run0") \
.replace("_sp", "_sp0") \
.replace("_tt", "_test")
# File that contains the time series objects
ts_file = os.path.join(dataset_dir, data)
# Current data will be loaded lazily
self.data[(0, 0, "test")] = ts_file
elif f_format in ["pickle", "mat"]:
for run_nr in range(self.meta_data["runs"]):
for split_nr in range(self.meta_data["splits"]):
for train_test in ["train", "test"]:
# The collection consists only of a single set of
# data, for one run, one splitting,
# and only test data
data = dataset_md["data_pattern"]\
.replace("_run", "_run%s" % run_nr) \
.replace("_sp", "_sp%s" % split_nr) \
.replace("_tt", "_%s" % train_test)
# File that contains the time series objects
ts_file = os.path.join(dataset_dir,data)
# Actual data will be loaded lazily
self.data[(run_nr, split_nr, train_test)] = ts_file
elif f_format.startswith("bci_comp"):
# get bci competion and dataset number
try:
self.comp_number, self.comp_set = f_format.split('_')[2:]
except Exception:
raise Exception, "%s --- Could not extract BCI competition"\
" and dataset number!" % f_format
if self.comp_number == "2":
if self.comp_set == "4":
def _update_sf(self, file_name):
if '1000' in file_name:
self.sf = 1000
else:
self.sf = 100
self.meta_data["sampling_frequency"] = self.sf
# structure: 2 mat file with data in different sampling
# frequencies; txt file for test labels
if "sampling_frequency" in self.meta_data.keys() and \
"file_name" in self.meta_data.keys():
# are they inconsistent?
self.sf = self.meta_data["sampling_frequency"]
if (self.sf == 100 and '1000' in \
self.meta_data["file_name"]) or \
(self.sf == 1000 and '1000' not in \
self.meta_data["file_name"]):
warnings.warn("File name does not match "
"sampling frequency or vice versa. %s "
"is loaded." % self.meta_data["file_name"])
self._update_sf(self.meta_data["file_name"])
ts_file = os.path.join(dataset_dir,
self.meta_data["file_name"])
elif "file_name" in self.meta_data.keys():
self._update_sf(self.meta_data["file_name"])
ts_file = os.path.join(dataset_dir,
self.meta_data["file_name"])
elif "sampling_frequency" in self.meta_data.keys():
self.sf = self.meta_data["sampling_frequency"]
if self.sf == 1000:
ts_file = os.path.join(dataset_dir,
"sp1s_aa_1000Hz.mat")
else:
ts_file = os.path.join(dataset_dir,
"sp1s_aa.mat")
else:
ts_file = glob.glob(os.path.join(dataset_dir,
"*.mat"))[0]
warnings.warn("Either file name nor sampling "
"frequency is given. %s is loaded." % ts_file)
self._update_sf(ts_file)
self.data[(0, 0, "test")] = ts_file
self.data[(0, 0, "train")] = ts_file
else:
raise NotImplementedError("Loading of BCI competition" \
" %s, dataset %s not supported " \
% (self.comp_number, self.comp_set))
elif self.comp_number == "3":
if self.comp_set == "2":
# structure: mat file for train and test data
# TODO: loading test labels is not possible at the moment!
# glob.glob(os.path.join(dataset_dir,"*Test.mat"))[0]
#self.data[(0, 0, "train")] = \
self.data[(0, 0, "test")] = \
glob.glob(os.path.join(dataset_dir,"*Train.mat"))[0]
else:
raise NotImplementedError("Loading of BCI competition" \
" %s, dataset %s not supported " \
% (self.comp_number, self.comp_set))
else:
raise NotImplementedError("Loading of BCI competition %s," \
" dataset %s not supported " \
% (self.comp_number, self.comp_set))
else: # s_format == "csv":
if "file_name" in self.meta_data.keys():
ts_file = os.path.join(dataset_dir,
self.meta_data["file_name"])
elif "data_pattern" in self.meta_data.keys():
# The collection consists only of a single set of data, for
# one run, one splitting, and only test data
data = dataset_md["data_pattern"].replace("_run", "_run0") \
.replace("_sp","_sp0") \
.replace("_tt","_test")
ts_file = os.path.join(dataset_dir,data)
elif os.path.isfile(os.path.join(dataset_dir,"data.csv")):
ts_file = os.path.join(dataset_dir,"data.csv")
else:
pathlist = glob.glob(os.path.join(dataset_dir,"*.csv"))
if len(pathlist)>1:
warnings.warn(
"To many given data sets:%s. Taking first entry."
% str(pathlist))
ts_file = pathlist[0]
elif len(pathlist) == 0:
warnings.warn("No csv file found. Trying any file.")
pathlist = glob.glob(os.path.join(dataset_dir, "*"))
ts_file = pathlist[0]
if "metadata.yaml" in ts_file:
ts_file = pathlist[1]
self.data[(0, 0, "test")] = ts_file
self.sort_string = sort_string if sort_string is not None else None
def get_data(self, run_nr, split_nr, train_test):
""" Return the train or test data for the given split in the given run.
**Parameters**
:run_nr: The number of the run whose data should be loaded.
:split_nr: The number of the split whose data should be loaded.
:train_test: "train" if the training data should be loaded.
"test" if the test data should be loaded.
"""
# Do lazy loading of the time series objects.
if isinstance(self.data[(run_nr, split_nr, train_test)], basestring):
self._log("Lazy loading of %s time series windows from input "
"collection for run %s, split %s." % (train_test, run_nr,
split_nr))
s_format = self.meta_data["storage_format"]
if type(s_format) == list:
f_format = s_format[0]
else:
f_format = s_format
if f_format == "pickle":
# Load the time series from a pickled file
f = open(self.data[(run_nr, split_nr, train_test)], 'r')
try:
self.data[(run_nr, split_nr, train_test)] = cPickle.load(f)
except ImportError:
# code for backward compatibility
# redirection of old path
f.seek(0)
self._log("Loading deprecated data. Please transfer it " +
"to new format.",level=logging.WARNING)
from pySPACE.resources.data_types import time_series
sys.modules['abri_dp.types.time_series'] = time_series
self.data[(run_nr, split_nr, train_test)] = cPickle.load(f)
del sys.modules['abri_dp.types.time_series']
f.close()
elif f_format in ["mat", "matlab", "MATLAB"]:
from scipy.io import loadmat
from pySPACE.resources.data_types.time_series import TimeSeries
ts_fname = self.data[(run_nr, split_nr, train_test)]
dataset = loadmat(ts_fname)
channel_names = [name.strip() for name in dataset['channel_names']]
sf = dataset["sampling_frequency"][0][0]
self.data[(run_nr, split_nr, train_test)] = []
# assume third axis to be trial axis
if "channelXtime" in s_format:
for i in range(dataset["data"].shape[2]):
self.data[(run_nr, split_nr, train_test)].append(\
(TimeSeries(dataset["data"][:,:,i].T, channel_names,
sf), dataset["labels"][i].strip()))
else:
for i in range(dataset["data"].shape[2]):
self.data[(run_nr, split_nr, train_test)].append(\
(TimeSeries(dataset["data"][:,:,i], channel_names,
sf), dataset["labels"][i].strip()))
elif f_format.startswith("bci_comp"):
from scipy.io import loadmat
from pySPACE.resources.data_types.time_series import TimeSeries
if self.comp_number == "2":
if self.comp_set == "4":
ts_fname = self.data[(run_nr, split_nr, train_test)]
d = loadmat(ts_fname)
channel_names = [name[0].astype('|S3') for name in \
d["clab"][0]]
if train_test == "train":
self.data[(run_nr, split_nr, train_test)] = []
input_d = d["x_train"]
input_l = d["y_train"][0]
for i in range(input_d.shape[2]):
self.data[(run_nr, split_nr,
train_test)].append(\
(TimeSeries(input_d[:,:,i],
channel_names, float(self.sf)),
"Left" if input_l[i] == 0 else "Right"))
else:
label_fname = glob.glob(os.path.join(
os.path.dirname(ts_fname),"*.txt"))[0]
input_d = d["x_test"]
input_l = open(label_fname,'r')
self.data[(run_nr, split_nr, train_test)] = []
for i in range(input_d.shape[2]):
label = int(input_l.readline())
self.data[(run_nr, split_nr,
train_test)].append(\
(TimeSeries(input_d[:,:,i],
channel_names, float(self.sf)),
"Left" if label == 0 else "Right"))
elif self.comp_number == "3":
if self.comp_set == "2":
data = loadmat(self.data[(run_nr, split_nr, train_test)])
signal = data['Signal']
flashing = data['Flashing']
stimulus_code = data['StimulusCode']
stimulus_type = data['StimulusType']
window = 240
Fs = 240
channels = 64
epochs = signal.shape[0]
self.data[(run_nr, split_nr, train_test)] = []
self.start_offset_ms = 1000.0
self.end_offset_ms = 1000.0
whole_len = (self.start_offset_ms + self.end_offset_ms)*Fs/1000.0 + window
responses = numpy.zeros((12, 15, whole_len, channels))
for epoch in range(epochs):
rowcolcnt=numpy.ones(12)
for n in range(1, signal.shape[1]):
if (flashing[epoch,n]==0 and flashing[epoch,n-1]==1):
rowcol=stimulus_code[epoch,n-1]
if n-24-self.start_offset_ms*Fs/1000.0 < 0:
temp = signal[epoch,0:n+window+self.end_offset_ms*Fs/1000.0-24,:]
temp = numpy.vstack((numpy.zeros((whole_len - temp.shape[0], temp.shape[1])), temp))
elif n+window+self.end_offset_ms*Fs/1000.0-24> signal.shape[1]:
temp = signal[epoch,n-24-self.start_offset_ms*Fs/1000.0:signal.shape[1],:]
temp = numpy.vstack((temp, numpy.zeros((whole_len-temp.shape[0], temp.shape[1]))))
else:
temp = signal[epoch, n-24-self.start_offset_ms*Fs/1000.0:n+window+self.end_offset_ms*Fs/1000.0-24, :]
responses[rowcol-1,rowcolcnt[rowcol-1]-1,:,:]=temp
rowcolcnt[rowcol-1]=rowcolcnt[rowcol-1]+1
avgresp=numpy.mean(responses,1)
targets = stimulus_code[epoch,:]*stimulus_type[epoch,:]
target_rowcol = []
for value in targets:
if value not in target_rowcol:
target_rowcol.append(value)
target_rowcol.sort()
for i in range(avgresp.shape[0]):
temp = avgresp[i,:,:]
data = TimeSeries(input_array = temp,
channel_names = range(64),
sampling_frequency = window)
if i == target_rowcol[1]-1 or i == target_rowcol[2]-1:
self.data[(run_nr, split_nr, train_test)].append((data,"Target"))
else:
self.data[(run_nr, split_nr, train_test)].append((data,"Standard"))
if self.stream_mode and not self.data[(run_nr, split_nr, train_test)] == []:
# Create a connection to the TimeSeriesClient and return an iterator
# that passes all received data through the windower.
self.reader = TimeSeriesClient(self.data[(run_nr, split_nr, train_test)], blocksize=100)
# Creates a windower that splits the training data into windows
# based in the window definitions provided
# and assigns correct labels to these windows
self.reader.set_window_defs(self.window_definition)
self.reader.connect()
self.marker_windower = MarkerWindower(
self.reader, self.window_definition,
nullmarker_stride_ms=self.nullmarker_stride_ms,
no_overlap=self.no_overlap,
data_consistency_check=self.data_consistency_check)
return self.marker_windower
else:
return self.data[(run_nr, split_nr, train_test)]
def store(self, result_dir, s_format="pickle"):
""" Stores this collection in the directory *result_dir*.
In contrast to *dump* this method stores the collection
not in a single file but as a whole directory structure with meta
information etc. The data sets are stored separately for each run,
split, train/test combination.
**Parameters**
:result_dir:
The directory in which the collection will be stored.
:name:
The prefix of the file names in which the individual data sets are
stored. The actual file names are determined by appending suffixes
that encode run, split, train/test information.
(*optional, default: "time_series"*)
:s_format:
The format in which the actual data sets should be stored.
Possible formats are 'pickle', 'text', 'csv' and 'mat' (matlab)
format. If s_format is a list, the second element further
specifies additional options for storing.
- pickle:
Standard Python format
- text:
In the text format, all time series objects are concatenated
to a single large table containing only integer values.
- csv:
For the csv format comma separated values are taken as default
or a specified Python format string.
- mat:
Scipy's savemat function is used for storing. Thereby the data
is stored as 3 dimensional array. Also meta data information,
like sampling frequency and channel names are saved.
As an additional parameter the orientation of the data arrays
can be given as 'channelXtime' or 'timeXchannel'
.. note:: For the text and MATLAB format, markers could be added
by using a Marker_To_Mux node before
(*optional, default: "pickle"*)
.. todo:: Put marker to the right time point and also write marker channel.
.. todo:: Shouldn't be 'text' and 'csv' format part of the stream data
set?!
"""
name = "time_series"
# for some storage procedures we need further specifications
s_type = None
if type(s_format) == list:
# file format is first position
f_format = s_format[0]
if len(s_format) > 1:
s_type = s_format[1]
else:
f_format = s_format
if f_format == "text" and s_type is None:
s_type = "%i"
elif f_format == "csv" and s_type == "real":
s_type = "%.18e"
# Update the meta data
author = get_author()
self.update_meta_data({"type": "time_series",
"storage_format": s_format,
"author": author,
"data_pattern": "data_run" + os.sep
+ name + "_sp_tt." + f_format})
# Iterate through splits and runs in this dataset
for key, time_series in self.data.iteritems():
# load data, if necessary
# (due to the lazy loading, the data might be not loaded already)
if isinstance(time_series, basestring):
time_series = self.get_data(key[0], key[1], key[2])
if self.sort_string is not None:
time_series.sort(key=eval(self.sort_string))
# Construct result directory
result_path = result_dir + os.sep + "data" + "_run%s" % key[0]
if not os.path.exists(result_path):
os.mkdir(result_path)
key_str = "_sp%s_%s" % key[1:]
# Store data depending on the desired format
if f_format in ["pickle", "cpickle", "cPickle"]:
result_file = open(os.path.join(result_path,
name+key_str+".pickle"), "w")
cPickle.dump(time_series, result_file, cPickle.HIGHEST_PROTOCOL)
result_file.close()
elif f_format in ["text","csv"]:
self.update_meta_data({
"type": "stream",
"marker_column": "marker"})
result_file = open(os.path.join(result_path,
name + key_str + ".csv"), "w")
csvwriter = csv.writer(result_file)
channel_names = copy.deepcopy(time_series[0][0].channel_names)
if f_format == "csv":
channel_names.append("marker")
csvwriter.writerow(channel_names)
for (data, key) in time_series:
if f_format == "text":
numpy.savetxt(result_file, data, delimiter=",", fmt=s_type)
if not key is None:
result_file.write(str(key))
result_file.flush()
elif data.marker_name is not None \
and len(data.marker_name) > 0:
result_file.write(str(data.marker_name))
result_file.flush()
else:
first_line = True
marker = ""
if not key is None:
marker = str(key)
elif data.marker_name is not None \
and len(data.marker_name) > 0:
marker = str(data.marker_name)
for line in data:
l = list(line)
l.append(marker)
csvwriter.writerow(list(l))
if first_line:
first_line = False
marker = ""
result_file.flush()
result_file.close()
elif f_format in ["matlab", "mat", "MATLAB"]:
# todo: handle all the other attributes of ts objects!
import scipy.io
result_file_name = os.path.join(result_path,
name + key_str + ".mat")
# extract a first time series object to get meta data
ts1 = time_series[0][0]
# collect all important information in the collection_object
dataset_dict = {
"sampling_frequency": ts1.sampling_frequency,
"channel_names": ts1.channel_names}
# we have to extract the data and labels separatly
if 'channelXtime' in s_format:
dataset_dict["data"] = [data.T for data, _ in time_series]
else:
dataset_dict["data"] = [data for data, _ in time_series]
dataset_dict["labels"] = [label for _, label in time_series]
# construct numpy 3d array (e.g., channelXtimeXtrials)
dataset_dict["data"] = numpy.rollaxis(numpy.array(
dataset_dict["data"]), 0, 3)
scipy.io.savemat(result_file_name, mdict=dataset_dict)
elif f_format in ["bp_eeg"]:
result_file = open(os.path.join(result_path,
name + key_str + ".eeg"),"a+")
result_file_mrk = open(os.path.join(result_path,
name + key_str + ".vmrk"),"w")
result_file_mrk.write("Brain Vision Data Exchange Marker File, "
"Version 1.0\n")
result_file_mrk.write("; Data stored by pySPACE\n")
result_file_mrk.write("[Common Infos]\n")
result_file_mrk.write("Codepage=UTF-8\n")
result_file_mrk.write("DataFile=%s\n" %
str(name + key_str + ".eeg"))
result_file_mrk.write("\n[Marker Infos]\n")
markerno = 1
datapoint = 1
sf = None
channel_names = None
for t in time_series:
if sf is None:
sf = t[0].sampling_frequency
if channel_names is None:
channel_names = t[0].get_channel_names()
for mrk in t[0].marker_name.keys():
for tm in t[0].marker_name[mrk]:
result_file_mrk.write(str("Mk%d=Stimulus,%s,%d,1,0\n" %
(markerno, mrk, datapoint+(tm*sf/1000.0))))
markerno += 1
data_ = t[0].astype(numpy.int16)
data_.tofile(result_file)
datapoint += data_.shape[0]
result_hdr = open(os.path.join(result_path,
name + key_str + ".vhdr"),"w")
result_hdr.write("Brain Vision Data Exchange Header "
"File Version 1.0\n")
result_hdr.write("; Data stored by pySPACE\n\n")
result_hdr.write("[Common Infos]\n")
result_hdr.write("Codepage=UTF-8\n")
result_hdr.write("DataFile=%s\n" %
str(name + key_str + ".eeg"))
result_hdr.write("MarkerFile=%s\n" %
str(name + key_str + ".vmrk"))
result_hdr.write("DataFormat=BINARY\n")
result_hdr.write("DataOrientation=MULTIPLEXED\n")
result_hdr.write("NumberOfChannels=%d\n" % len(channel_names))
result_hdr.write("SamplingInterval=%d\n\n" % (1000000/sf))
result_hdr.write("[Binary Infos]\n")
result_hdr.write("BinaryFormat=INT_16\n\n")
result_hdr.write("[Channel Infos]\n")
# TODO: Add Resolutions to time_series
# 0 = 0.1 [micro]V,
# 1 = 0.5 [micro]V,
# 2 = 10 [micro]V,
# 3 = 152.6 [micro]V (seems to be unused!)
resolutions_str = [unicode("0.1,%sV" % unicode(u"\u03BC")),
unicode("0.5,%sV" % unicode(u"\u03BC")),
unicode("10,%sV" % unicode(u"\u03BC")),
unicode("152.6,%sV" % unicode(u"\u03BC"))]
for i in range(len(channel_names)):
result_hdr.write(unicode("Ch%d=%s,,%s\n" %
(i+1,channel_names[i],
unicode(resolutions_str[0]))).encode('utf-8'))
result_file.close()
else:
NotImplementedError("Using unavailable storage format:%s!"
% f_format)
self.update_meta_data({
"channel_names": copy.deepcopy(time_series[0][0].channel_names),
"sampling_frequency": time_series[0][0].sampling_frequency
})
#Store meta data
BaseDataset.store_meta_data(result_dir, self.meta_data)
def set_window_defs(self, window_definition, nullmarker_stride_ms=1000,
no_overlap=False, data_consistency_check=False):
"""Code copied from StreamDataset for rewindowing data"""
self.window_definition = window_definition
self.nullmarker_stride_ms = nullmarker_stride_ms
self.no_overlap = no_overlap
self.data_consistency_check = data_consistency_check
self.stream_mode = True
class TimeSeriesClient(AbstractStreamReader):
"""TimeSeries stream client for TimeSeries"""
def __init__(self, ts_stream, **kwargs):
self.callbacks = list()
self._markerids= {"null":0} # default marker
self._markerNames = {0:"null"} # dictionary with marker names
self.nmarkertypes = len(self.markerids) # number of different markers
self._stdblocksize = None
self._dSamplingInterval = None
self.ts_stream = ts_stream
self.blockcounter = 0
# create two different iterators,
# one for data reading, the other for
# peeking etc
(self.ts_stream_iter,self.backup_iter) = itertools.tee(iter(ts_stream))
@property
def dSamplingInterval(self):
return self._dSamplingInterval
@property
def stdblocksize(self):
return self._stdblocksize
@property
def markerids(self):
return self._markerids
@property
def channelNames(self):
return self._channelNames
@property
def markerNames(self):
return self._markerNames
def regcallback(self, func):
"""Register callback function"""
self.callbacks.append(func)
def connect(self):
"""connect and initialize client"""
try:
self._initialize(self.backup_iter.next())
except StopIteration as e:
print("timeseriesclient got no data: %s" % e)
# if a StopIteration is catched right here there
# is no data contained for the current modality (train/test)
# in this datastream.
pass
def set_window_defs(self, window_definitions):
""" Set all markers at which the windows are cut"""
# extract start and endmarker
marker_id_index = self.nmarkertypes
self._markerNames[marker_id_index] = window_definitions[0].startmarker
self._markerids[window_definitions[0].startmarker] = marker_id_index
marker_id_index += 1
self._markerNames[marker_id_index] = window_definitions[0].endmarker
self._markerids[window_definitions[0].endmarker] = marker_id_index
marker_id_index += 1
# extract all other markers
for wdef in window_definitions:
if not self.markerids.has_key(wdef.markername):
self._markerNames[marker_id_index] = wdef.markername
self._markerids[wdef.markername] = marker_id_index
marker_id_index += 1
# Exclude definitions marker
for edef in wdef.excludedefs:
if not self.markerids.has_key(edef.markername):
self._markerNames[marker_id_index] = edef.markername
self._markerids[edef.markername] = marker_id_index
marker_id_index += 1
# Include definitions marker
for idef in wdef.includedefs:
if not self.markerids.has_key(idef.markername):
self._markerNames[marker_id_index] = idef.markername
self._markerids[idef.markername] = marker_id_index
marker_id_index += 1
self.nmarkertypes = len(self.markerNames.keys())
def _initialize(self,item):
# get data part from (data,label) combination
block = item[0]
self.nChannels = block.shape[1]
self._stdblocksize = block.shape[0]
self._dSamplingInterval = block.sampling_frequency
self._channelNames = block.channel_names
def read(self, nblocks=1, verbose=False):
"""Invoke registered callbacks for each incoming data block
returns number of read _data_ blocks"""
ret = 0
nread = 0
while ret != -1 and \
ret is not None and \
(nblocks == -1 or nread < nblocks):
ret = self._readmsg(verbose=False)
if ret is not None:
for f in self.callbacks:
f(self.ndsamples, self.ndmarkers)
nread += 1
return nread
def _readmsg(self, msg_type='all', verbose=False):
""" Read time series object from given iterator
"""
# the iter items are a combination of data and
# dummy label -> extract data
block = self.ts_stream_iter.next()[0]
self.blockcounter += 1
if block is None:
return None
# no data message read until know
# -> initialize property values
if self.nChannels is None:
self._initialize()
self.ndmarkers = numpy.zeros([self.stdblocksize], int)
self.ndmarkers.fill(-1)
if block.shape[0] < self.stdblocksize:
return 1
for (marker, positions) in block.marker_name.iteritems():
for position_as_ms in positions:
# position_as_samples = numpy.floor(position_as_ms / 1000.0 *
# self.dSamplingInterval)
position_as_samples = numpy.int(position_as_ms / 1000.0 *
self.dSamplingInterval)
# found a new marker, add it to marker name buffer
if marker == -1 or not self.markerids.has_key(marker):
self.nmarkertypes += 1
self.markerNames[self.nmarkertypes] = marker
self.markerids[marker] = self.nmarkertypes
if not self.markerids.has_key(marker):
continue
markerid = self.markerids[marker]
if self.ndmarkers[position_as_samples] == -1:
self.ndmarkers[position_as_samples] = markerid
elif position_as_samples < self.stdblocksize:
self.ndmarkers[position_as_samples] = markerid
else:
self.lostmarker = True
self.lostmarkertypedesc = markerid
self.readSize = (self.nChannels * self.stdblocksize)
self.ndsamples = numpy.array(block)
self.ndsamples.shape = (self.stdblocksize, self.nChannels)
self.ndsamples = scipy.transpose(self.ndsamples)
return 1
| gpl-3.0 |
olga-perederieieva/pyDEA | tests/test_main.py | 2 | 1640 | import os
import shutil
from pyDEA.main import main
from pyDEA.core.data_processing.parameters import parse_parameters_from_file
from pyDEA.core.utils.dea_utils import auto_name_if_needed
def test_main_correct_params():
filename = 'tests/params_new_format.txt'
params = parse_parameters_from_file(filename)
auto_name = auto_name_if_needed(params, 'xlsx')
main(filename)
assert os.path.exists(auto_name) is True
os.remove(auto_name)
def test_main_output_format():
filename = 'tests/params_new_format.txt'
params = parse_parameters_from_file(filename)
auto_name = auto_name_if_needed(params, 'xls')
main(filename, output_format='xls')
assert os.path.exists(auto_name) is True
os.remove(auto_name)
def test_main_output_dir():
filename = 'tests/params_new_format.txt'
params = parse_parameters_from_file(filename)
auto_name = auto_name_if_needed(params, 'xls')
output_dir = 'tests'
main(filename, output_format='xls', output_dir=output_dir)
output_name = os.path.join(output_dir, auto_name)
assert os.path.exists(output_name) is True
os.remove(output_name)
def test_main_different_sheet():
filename = 'tests/params_to_test_main.txt'
main(filename, output_format='', sheet_name_usr='Sheet2')
assert os.path.exists('haha') is True
shutil.rmtree('haha')
def test_main_csv():
filename = 'tests/params_to_test_main_csv.txt'
params = parse_parameters_from_file(filename)
auto_name = auto_name_if_needed(params, 'xlsx')
main(filename, sheet_name_usr='haha')
assert os.path.exists(auto_name) is True
os.remove(auto_name)
| mit |
bzbarsky/servo | tests/wpt/web-platform-tests/tools/six/documentation/conf.py | 420 | 7015 | # -*- coding: utf-8 -*-
#
# six documentation build configuration file
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"six"
copyright = u"2010-2014, Benjamin Peterson"
sys.path.append(os.path.abspath(os.path.join(".", "..")))
from six import __version__ as six_version
sys.path.pop()
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = six_version[:-2]
# The full version, including alpha/beta/rc tags.
release = six_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sixdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "six.tex", u"six Documentation",
u"Benjamin Peterson", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "six", u"six Documentation",
[u"Benjamin Peterson"], 1)
]
# -- Intersphinx ---------------------------------------------------------------
intersphinx_mapping = {"py2" : ("https://docs.python.org/2/", None),
"py3" : ("https://docs.python.org/3/", None)}
| mpl-2.0 |
burdell/CS4464-Final-Project | django/contrib/localflavor/il/forms.py | 317 | 2192 | """
Israeli-specific form helpers
"""
import re
from django.core.exceptions import ValidationError
from django.core.validators import EMPTY_VALUES
from django.forms.fields import RegexField, Field, EMPTY_VALUES
from django.utils.checksums import luhn
from django.utils.translation import ugettext_lazy as _
# Israeli ID numbers consist of up to 8 digits followed by a checksum digit.
# Numbers which are shorter than 8 digits are effectively left-zero-padded.
# The checksum digit is occasionally separated from the number by a hyphen,
# and is calculated using the luhn algorithm.
#
# Relevant references:
#
# (hebrew) http://he.wikipedia.org/wiki/%D7%9E%D7%A1%D7%A4%D7%A8_%D7%96%D7%94%D7%95%D7%AA_(%D7%99%D7%A9%D7%A8%D7%90%D7%9C)
# (hebrew) http://he.wikipedia.org/wiki/%D7%A1%D7%A4%D7%A8%D7%AA_%D7%91%D7%99%D7%A7%D7%95%D7%A8%D7%AA
id_number_re = re.compile(r'^(?P<number>\d{1,8})-?(?P<check>\d)$')
class ILPostalCodeField(RegexField):
"""
A form field that validates its input as an Israeli postal code.
Valid form is XXXXX where X represents integer.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XXXXX'),
}
def __init__(self, *args, **kwargs):
super(ILPostalCodeField, self).__init__(r'^\d{5}$', *args, **kwargs)
def clean(self, value):
if value not in EMPTY_VALUES:
value = value.replace(" ", "")
return super(ILPostalCodeField, self).clean(value)
class ILIDNumberField(Field):
"""
A form field that validates its input as an Israeli identification number.
Valid form is per the Israeli ID specification.
"""
default_error_messages = {
'invalid': _(u'Enter a valid ID number.'),
}
def clean(self, value):
value = super(ILIDNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
match = id_number_re.match(value)
if not match:
raise ValidationError(self.error_messages['invalid'])
value = match.group('number') + match.group('check')
if not luhn(value):
raise ValidationError(self.error_messages['invalid'])
return value
| bsd-3-clause |
wndias/bc.repository | script.module.urlresolver/lib/urlresolver/plugins/watchvideo.py | 4 | 2141 | '''
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re
from lib import jsunpack
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class WatchVideoResolver(UrlResolver):
name = "watchvideo.us"
domains = ["watchvideo.us", "watchvideo4.us"]
pattern = '(?://|\.)(watchvideo[0-9]?\.us)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
html = self.net.http_GET(web_url).content
if html.find('404 Not Found') >= 0:
raise ResolverError('File Removed')
if html.find('Video is processing') >= 0:
raise ResolverError('File still being processed')
packed = re.search('(eval\(function.*?)\s*</script>', html, re.DOTALL)
if packed:
js = jsunpack.unpack(packed.group(1))
else:
js = html
link = re.search('(?:m3u8").*?"(.*?)"', js)
if link:
common.log_utils.log_debug('watchvideo.us Link Found: %s' % link.group(1))
return link.group(1)
raise ResolverError('Unable to find watchvideo.us video')
def get_url(self, host, media_id):
return 'http://%s/%s.html' % (host, media_id)
def get_host_and_id(self, url):
r = re.search(self.pattern, url)
if r:
return r.groups()
else:
return False
def valid_url(self, url, host):
return re.search(self.pattern, url) or self.name in host
| gpl-2.0 |
Micronaet/micronaet-contract | base_import_fraternita/partner.py | 1 | 2667 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP module
# Copyright (C) 2010 Micronaet srl (<http://www.micronaet.it>)
#
# Italian OpenERP Community (<http://www.openerp-italia.com>)
#
#############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields
class res_partner_extra_fields(osv.osv):
_inherit ='res.partner'
_columns = {
'extra_phone': fields.text('Extra phone'),
'is_employee': fields.boolean('Is employee', required=False),
}
res_partner_extra_fields()
class res_partner_address_extra_fields(osv.osv):
_inherit ='res.partner.address'
_columns = {
'extra_phone': fields.text('Extra phone'),
}
res_partner_address_extra_fields()
class hr_employee_extra_fields(osv.osv):
_inherit ='hr.employee'
_columns = {
'import':fields.boolean('Import', required=False),
'birth_place': fields.char('Birth place', size=80, required=False, readonly=False),
# settore
# stato
# TODO mettere i campi nella view!!
'date_recruitment': fields.date('Date recruitment'), # data_assunzione
'date_retired': fields.date('Date retired'), # data dimissione
'curricula': fields.text('Curricula'),
'patent_type': fields.char('Patent type', size=5, required=False, readonly=False), # patente
# mansione
# orario di lavoro
# tipo contratto
'date_end_contract': fields.date('End contract'), # data fine contratto
# > department_id (esiste)
# > contract_ids (esiste
}
hr_employee_extra_fields()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
juju/juju-tosca | resources/juju/metadata.py | 1 | 2213 | #!/usr/bin/python
#
# Copyright 2014 IBM Corporation
# Zhaizhixiang (zhzxzhai@cn.ibm.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from resources.resource import Resource
class Metadata(Resource):
'''
classdocs
'''
SCHEMA = (NAME, SUMMARY, DESCRIPTION, MAINTAINER,
CATEGORIES, SUBORDINATE, PROVIDES, REQUIRES, PEERS) = \
('name', 'summary', 'description', 'maintainer',
'categories', 'subordinate', 'provides', 'requires', 'peers')
CATEGORIES_APPLICATIONS = 'applications'
CATEGORIES_APP_SERVERS = 'app-servers'
CATEGORIES_CACHE_PROXY = 'cache-proxy'
CATEGORIES_DATABASES = 'databases'
CATEGORIES_FILE_SERVERS = 'file-servers'
CATEGORIES_MISC = 'misc'
def add_provide(self, name, interface):
if self.get_item(self.PROVIDES) is None:
self.set_item(self.PROVIDES, {})
provides = self.get_item(self.PROVIDES)
provides[name] = interface
def add_peer(self, name, interface):
if self.get_item(self.PEERS) is None:
self.set_item(self.PEERS, {})
peers = self.get_item(self.PEERS, {})
peers[name] = interface
def add_require(self, name, interface):
if self.get_item(self.REQUIRES) is None:
self.set_item(self.REQUIRES, {})
requires = self.get_item(self.REQUIRES, {})
requires[name] = interface
class Interface(Resource):
SCHEMA = (INTERFACE, LIMIT, SCOPE, OPTIONAL) = \
("interface", "limit", "scope", "optional")
SCOPE_GLOBAL = "global"
SCOPE_CONTAINER = "container"
| agpl-3.0 |
ykim362/mxnet | example/reinforcement-learning/a3c/sym.py | 53 | 1903 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import mxnet as mx
def get_symbol_atari(act_dim):
net = mx.symbol.Variable('data')
net = mx.symbol.Cast(data=net, dtype='float32')
net = mx.symbol.Convolution(data=net, name='conv1', kernel=(8, 8), stride=(4, 4), num_filter=16)
net = mx.symbol.Activation(data=net, name='relu1', act_type="relu")
net = mx.symbol.Convolution(data=net, name='conv2', kernel=(4, 4), stride=(2, 2), num_filter=32)
net = mx.symbol.Activation(data=net, name='relu2', act_type="relu")
net = mx.symbol.Flatten(data=net)
net = mx.symbol.FullyConnected(data=net, name='fc4', num_hidden=256)
net = mx.symbol.Activation(data=net, name='relu4', act_type="relu")
fc_policy = mx.symbol.FullyConnected(data=net, name='fc_policy', num_hidden=act_dim)
policy = mx.symbol.SoftmaxOutput(data=fc_policy, name='policy', out_grad=True)
entropy = mx.symbol.SoftmaxActivation(data=fc_policy, name='entropy')
value = mx.symbol.FullyConnected(data=net, name='fc_value', num_hidden=1)
value = mx.symbol.LinearRegressionOutput(data=value, name='value')
return mx.symbol.Group([policy, entropy, value])
| apache-2.0 |
projetoquixique/quixique | node_modules/node-gyp/gyp/tools/graphviz.py | 2679 | 2878 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Using the JSON dumped by the dump-dependency-json generator,
generate input suitable for graphviz to render a dependency graph of
targets."""
import collections
import json
import sys
def ParseTarget(target):
target, _, suffix = target.partition('#')
filename, _, target = target.partition(':')
return filename, target, suffix
def LoadEdges(filename, targets):
"""Load the edges map from the dump file, and filter it to only
show targets in |targets| and their depedendents."""
file = open('dump.json')
edges = json.load(file)
file.close()
# Copy out only the edges we're interested in from the full edge list.
target_edges = {}
to_visit = targets[:]
while to_visit:
src = to_visit.pop()
if src in target_edges:
continue
target_edges[src] = edges[src]
to_visit.extend(edges[src])
return target_edges
def WriteGraph(edges):
"""Print a graphviz graph to stdout.
|edges| is a map of target to a list of other targets it depends on."""
# Bucket targets by file.
files = collections.defaultdict(list)
for src, dst in edges.items():
build_file, target_name, toolset = ParseTarget(src)
files[build_file].append(src)
print 'digraph D {'
print ' fontsize=8' # Used by subgraphs.
print ' node [fontsize=8]'
# Output nodes by file. We must first write out each node within
# its file grouping before writing out any edges that may refer
# to those nodes.
for filename, targets in files.items():
if len(targets) == 1:
# If there's only one node for this file, simplify
# the display by making it a box without an internal node.
target = targets[0]
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename,
target_name)
else:
# Group multiple nodes together in a subgraph.
print ' subgraph "cluster_%s" {' % filename
print ' label = "%s"' % filename
for target in targets:
build_file, target_name, toolset = ParseTarget(target)
print ' "%s" [label="%s"]' % (target, target_name)
print ' }'
# Now that we've placed all the nodes within subgraphs, output all
# the edges between nodes.
for src, dsts in edges.items():
for dst in dsts:
print ' "%s" -> "%s"' % (src, dst)
print '}'
def main():
if len(sys.argv) < 2:
print >>sys.stderr, __doc__
print >>sys.stderr
print >>sys.stderr, 'usage: %s target1 target2...' % (sys.argv[0])
return 1
edges = LoadEdges('dump.json', sys.argv[1:])
WriteGraph(edges)
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 |
Elico-Corp/odoo_OCB | addons/anonymization/__openerp__.py | 18 | 1112 | # -*- encoding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Database Anonymization',
'version': '1.0',
'category': 'Extra Tools',
'description': """
This module allows you to anonymize a database.
===============================================
This module allows you to keep your data confidential for a given database.
This process is useful, if you want to use the migration process and protect
your own or your customer’s confidential data. The principle is that you run
an anonymization tool which will hide your confidential data(they are replaced
by ‘XXX’ characters). Then you can send the anonymized database to the migration
team. Once you get back your migrated database, you restore it and reverse the
anonymization process to recover your previous data.
""",
'depends': ['base'],
'demo': ['anonymization_demo.xml'],
'data': [
'ir.model.fields.anonymization.csv',
'security/ir.model.access.csv',
'anonymization_view.xml',
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
simbs/edx-platform | lms/djangoapps/student_account/views.py | 22 | 15892 | """ Views for a student's account information. """
import logging
import json
import urlparse
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.http import (
HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
)
from django.shortcuts import redirect
from django.http import HttpRequest
from django_countries import countries
from django.core.urlresolvers import reverse, resolve
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_http_methods
from lang_pref.api import released_languages
from edxmako.shortcuts import render_to_response
from microsite_configuration import microsite
from external_auth.login_and_register import (
login as external_auth_login,
register as external_auth_register
)
from student.models import UserProfile
from student.views import (
signin_user as old_login_view,
register_user as old_register_view
)
from student.helpers import get_next_url_for_login_page
import third_party_auth
from third_party_auth import pipeline
from third_party_auth.decorators import xframe_allow_whitelisted
from util.bad_request_rate_limiter import BadRequestRateLimiter
from openedx.core.djangoapps.user_api.accounts.api import request_password_change
from openedx.core.djangoapps.user_api.errors import UserNotFound
AUDIT_LOG = logging.getLogger("audit")
@require_http_methods(['GET'])
@ensure_csrf_cookie
@xframe_allow_whitelisted
def login_and_registration_form(request, initial_mode="login"):
"""Render the combined login/registration form, defaulting to login
This relies on the JS to asynchronously load the actual form from
the user_api.
Keyword Args:
initial_mode (string): Either "login" or "register".
"""
# Determine the URL to redirect to following login/registration/third_party_auth
redirect_to = get_next_url_for_login_page(request)
# If we're already logged in, redirect to the dashboard
if request.user.is_authenticated():
return redirect(redirect_to)
# Retrieve the form descriptions from the user API
form_descriptions = _get_form_descriptions(request)
# If this is a microsite, revert to the old login/registration pages.
# We need to do this for now to support existing themes.
# Microsites can use the new logistration page by setting
# 'ENABLE_COMBINED_LOGIN_REGISTRATION' in their microsites configuration file.
if microsite.is_request_in_microsite() and not microsite.get_value('ENABLE_COMBINED_LOGIN_REGISTRATION', False):
if initial_mode == "login":
return old_login_view(request)
elif initial_mode == "register":
return old_register_view(request)
# Allow external auth to intercept and handle the request
ext_auth_response = _external_auth_intercept(request, initial_mode)
if ext_auth_response is not None:
return ext_auth_response
# Our ?next= URL may itself contain a parameter 'tpa_hint=x' that we need to check.
# If present, we display a login page focused on third-party auth with that provider.
third_party_auth_hint = None
if '?' in redirect_to:
try:
next_args = urlparse.parse_qs(urlparse.urlparse(redirect_to).query)
provider_id = next_args['tpa_hint'][0]
if third_party_auth.provider.Registry.get(provider_id=provider_id):
third_party_auth_hint = provider_id
initial_mode = "hinted_login"
except (KeyError, ValueError, IndexError):
pass
# Otherwise, render the combined login/registration page
context = {
'data': {
'login_redirect_url': redirect_to,
'initial_mode': initial_mode,
'third_party_auth': _third_party_auth_context(request, redirect_to),
'third_party_auth_hint': third_party_auth_hint or '',
'platform_name': settings.PLATFORM_NAME,
# Include form descriptions retrieved from the user API.
# We could have the JS client make these requests directly,
# but we include them in the initial page load to avoid
# the additional round-trip to the server.
'login_form_desc': json.loads(form_descriptions['login']),
'registration_form_desc': json.loads(form_descriptions['registration']),
'password_reset_form_desc': json.loads(form_descriptions['password_reset']),
},
'login_redirect_url': redirect_to, # This gets added to the query string of the "Sign In" button in header
'responsive': True,
'allow_iframing': True,
'disable_courseware_js': True,
'disable_footer': True,
}
return render_to_response('student_account/login_and_register.html', context)
@require_http_methods(['POST'])
def password_change_request_handler(request):
"""Handle password change requests originating from the account page.
Uses the Account API to email the user a link to the password reset page.
Note:
The next step in the password reset process (confirmation) is currently handled
by student.views.password_reset_confirm_wrapper, a custom wrapper around Django's
password reset confirmation view.
Args:
request (HttpRequest)
Returns:
HttpResponse: 200 if the email was sent successfully
HttpResponse: 400 if there is no 'email' POST parameter, or if no user with
the provided email exists
HttpResponse: 403 if the client has been rate limited
HttpResponse: 405 if using an unsupported HTTP method
Example usage:
POST /account/password
"""
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Password reset rate limit exceeded")
return HttpResponseForbidden()
user = request.user
# Prefer logged-in user's email
email = user.email if user.is_authenticated() else request.POST.get('email')
if email:
try:
request_password_change(email, request.get_host(), request.is_secure())
except UserNotFound:
AUDIT_LOG.info("Invalid password reset attempt")
# Increment the rate limit counter
limiter.tick_bad_request_counter(request)
return HttpResponseBadRequest(_("No user with the provided email address exists."))
return HttpResponse(status=200)
else:
return HttpResponseBadRequest(_("No email address provided."))
def _third_party_auth_context(request, redirect_to):
"""Context for third party auth providers and the currently running pipeline.
Arguments:
request (HttpRequest): The request, used to determine if a pipeline
is currently running.
redirect_to: The URL to send the user to following successful
authentication.
Returns:
dict
"""
context = {
"currentProvider": None,
"providers": [],
"secondaryProviders": [],
"finishAuthUrl": None,
"errorMessage": None,
}
if third_party_auth.is_enabled():
for enabled in third_party_auth.provider.Registry.accepting_logins():
info = {
"id": enabled.provider_id,
"name": enabled.name,
"iconClass": enabled.icon_class,
"loginUrl": pipeline.get_login_url(
enabled.provider_id,
pipeline.AUTH_ENTRY_LOGIN,
redirect_url=redirect_to,
),
"registerUrl": pipeline.get_login_url(
enabled.provider_id,
pipeline.AUTH_ENTRY_REGISTER,
redirect_url=redirect_to,
),
}
context["providers" if not enabled.secondary else "secondaryProviders"].append(info)
running_pipeline = pipeline.get(request)
if running_pipeline is not None:
current_provider = third_party_auth.provider.Registry.get_from_pipeline(running_pipeline)
if current_provider is not None:
context["currentProvider"] = current_provider.name
context["finishAuthUrl"] = pipeline.get_complete_url(current_provider.backend_name)
if current_provider.skip_registration_form:
# As a reliable way of "skipping" the registration form, we just submit it automatically
context["autoSubmitRegForm"] = True
# Check for any error messages we may want to display:
for msg in messages.get_messages(request):
if msg.extra_tags.split()[0] == "social-auth":
# msg may or may not be translated. Try translating [again] in case we are able to:
context['errorMessage'] = _(unicode(msg)) # pylint: disable=translation-of-non-string
break
return context
def _get_form_descriptions(request):
"""Retrieve form descriptions from the user API.
Arguments:
request (HttpRequest): The original request, used to retrieve session info.
Returns:
dict: Keys are 'login', 'registration', and 'password_reset';
values are the JSON-serialized form descriptions.
"""
return {
'login': _local_server_get('/user_api/v1/account/login_session/', request.session),
'registration': _local_server_get('/user_api/v1/account/registration/', request.session),
'password_reset': _local_server_get('/user_api/v1/account/password_reset/', request.session)
}
def _local_server_get(url, session):
"""Simulate a server-server GET request for an in-process API.
Arguments:
url (str): The URL of the request (excluding the protocol and domain)
session (SessionStore): The session of the original request,
used to get past the CSRF checks.
Returns:
str: The content of the response
"""
# Since the user API is currently run in-process,
# we simulate the server-server API call by constructing
# our own request object. We don't need to include much
# information in the request except for the session
# (to get past through CSRF validation)
request = HttpRequest()
request.method = "GET"
request.session = session
# Call the Django view function, simulating
# the server-server API call
view, args, kwargs = resolve(url)
response = view(request, *args, **kwargs)
# Return the content of the response
return response.content
def _external_auth_intercept(request, mode):
"""Allow external auth to intercept a login/registration request.
Arguments:
request (Request): The original request.
mode (str): Either "login" or "register"
Returns:
Response or None
"""
if mode == "login":
return external_auth_login(request)
elif mode == "register":
return external_auth_register(request)
@login_required
@require_http_methods(['GET'])
def account_settings(request):
"""Render the current user's account settings page.
Args:
request (HttpRequest)
Returns:
HttpResponse: 200 if the page was sent successfully
HttpResponse: 302 if not logged in (redirect to login page)
HttpResponse: 405 if using an unsupported HTTP method
Example usage:
GET /account/settings
"""
return render_to_response('student_account/account_settings.html', account_settings_context(request))
@login_required
@require_http_methods(['GET'])
def finish_auth(request): # pylint: disable=unused-argument
""" Following logistration (1st or 3rd party), handle any special query string params.
See FinishAuthView.js for details on the query string params.
e.g. auto-enroll the user in a course, set email opt-in preference.
This view just displays a "Please wait" message while AJAX calls are made to enroll the
user in the course etc. This view is only used if a parameter like "course_id" is present
during login/registration/third_party_auth. Otherwise, there is no need for it.
Ideally this view will finish and redirect to the next step before the user even sees it.
Args:
request (HttpRequest)
Returns:
HttpResponse: 200 if the page was sent successfully
HttpResponse: 302 if not logged in (redirect to login page)
HttpResponse: 405 if using an unsupported HTTP method
Example usage:
GET /account/finish_auth/?course_id=course-v1:blah&enrollment_action=enroll
"""
return render_to_response('student_account/finish_auth.html', {
'disable_courseware_js': True,
'disable_footer': True,
})
def account_settings_context(request):
""" Context for the account settings page.
Args:
request: The request object.
Returns:
dict
"""
user = request.user
year_of_birth_options = [(unicode(year), unicode(year)) for year in UserProfile.VALID_YEARS]
context = {
'auth': {},
'duplicate_provider': None,
'fields': {
'country': {
'options': list(countries),
}, 'gender': {
'options': [(choice[0], _(choice[1])) for choice in UserProfile.GENDER_CHOICES], # pylint: disable=translation-of-non-string
}, 'language': {
'options': released_languages(),
}, 'level_of_education': {
'options': [(choice[0], _(choice[1])) for choice in UserProfile.LEVEL_OF_EDUCATION_CHOICES], # pylint: disable=translation-of-non-string
}, 'password': {
'url': reverse('password_reset'),
}, 'year_of_birth': {
'options': year_of_birth_options,
}, 'preferred_language': {
'options': settings.ALL_LANGUAGES,
}
},
'platform_name': settings.PLATFORM_NAME,
'user_accounts_api_url': reverse("accounts_api", kwargs={'username': user.username}),
'user_preferences_api_url': reverse('preferences_api', kwargs={'username': user.username}),
'disable_courseware_js': True,
}
if third_party_auth.is_enabled():
# If the account on the third party provider is already connected with another edX account,
# we display a message to the user.
context['duplicate_provider'] = pipeline.get_duplicate_provider(messages.get_messages(request))
auth_states = pipeline.get_provider_user_states(user)
context['auth']['providers'] = [{
'id': state.provider.provider_id,
'name': state.provider.name, # The name of the provider e.g. Facebook
'connected': state.has_account, # Whether the user's edX account is connected with the provider.
# If the user is not connected, they should be directed to this page to authenticate
# with the particular provider, as long as the provider supports initiating a login.
'connect_url': pipeline.get_login_url(
state.provider.provider_id,
pipeline.AUTH_ENTRY_ACCOUNT_SETTINGS,
# The url the user should be directed to after the auth process has completed.
redirect_url=reverse('account_settings'),
),
'accepts_logins': state.provider.accepts_logins,
# If the user is connected, sending a POST request to this url removes the connection
# information for this provider from their edX account.
'disconnect_url': pipeline.get_disconnect_url(state.provider.provider_id, state.association_id),
} for state in auth_states]
return context
| agpl-3.0 |
xuxiaoxin/micropython | tests/basics/int_mpz.py | 14 | 2093 | # to test arbitrariy precision integers
x = 1000000000000000000000000000000
y = 2000000000000000000000000000000
# printing
print(x)
print(y)
# addition
print(x + 1)
print(x + y)
# subtraction
print(x - 1)
print(x - y)
print(y - x)
# multiplication
print(x * 2)
print(x * y)
# integer division
print(x // 2)
print(y // x)
# bit inversion
print(~x)
print(~(-x))
# left shift
x = 0x10000000000000000000000
for i in range(32):
x = x << 1
print(x)
# right shift
x = 0x10000000000000000000000
for i in range(32):
x = x >> 1
print(x)
# left shift of a negative number
for i in range(8):
print(-10000000000000000000000000 << i)
print(-10000000000000000000000001 << i)
print(-10000000000000000000000002 << i)
print(-10000000000000000000000003 << i)
print(-10000000000000000000000004 << i)
# right shift of a negative number
for i in range(8):
print(-10000000000000000000000000 >> i)
print(-10000000000000000000000001 >> i)
print(-10000000000000000000000002 >> i)
print(-10000000000000000000000003 >> i)
print(-10000000000000000000000004 >> i)
# conversion from string
print(int("123456789012345678901234567890"))
print(int("-123456789012345678901234567890"))
print(int("123456789012345678901234567890abcdef", 16))
print(int("123456789012345678901234567890ABCDEF", 16))
# test constant integer with more than 255 chars
x = 0x84ce72aa8699df436059f052ac51b6398d2511e49631bcb7e71f89c499b9ee425dfbc13a5f6d408471b054f2655617cbbaf7937b7c80cd8865cf02c8487d30d2b0fbd8b2c4e102e16d828374bbc47b93852f212d5043c3ea720f086178ff798cc4f63f787b9c2e419efa033e7644ea7936f54462dc21a6c4580725f7f0e7d1aaaaaaa
print(x)
# test parsing ints just on threshold of small to big
# for 32 bit archs
x = 1073741823 # small
x = -1073741823 # small
x = 1073741824 # big
x = -1073741824 # big
# for 64 bit archs
x = 4611686018427387903 # small
x = -4611686018427387903 # small
x = 4611686018427387904 # big
x = -4611686018427387904 # big
# sys.maxsize is a constant mpz, so test it's compatible with dynamic ones
import sys
print(sys.maxsize + 1 - 1 == sys.maxsize)
| mit |
jordij/wagtail | wagtail/wagtailadmin/views/account.py | 25 | 4016 | from django.conf import settings
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.forms import SetPasswordForm
from django.contrib.auth.views import logout as auth_logout, login as auth_login
from django.contrib.auth import update_session_auth_hash
from django.utils.translation import ugettext as _
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.cache import never_cache
from wagtail.wagtailadmin import forms
from wagtail.wagtailusers.forms import NotificationPreferencesForm
from wagtail.wagtailusers.models import UserProfile
from wagtail.wagtailcore.models import UserPagePermissionsProxy
def account(request):
user_perms = UserPagePermissionsProxy(request.user)
show_notification_preferences = user_perms.can_edit_pages() or user_perms.can_publish_pages()
return render(request, 'wagtailadmin/account/account.html', {
'show_change_password': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True) and request.user.has_usable_password(),
'show_notification_preferences': show_notification_preferences
})
def change_password(request):
can_change_password = request.user.has_usable_password()
if can_change_password:
if request.POST:
form = SetPasswordForm(request.user, request.POST)
if form.is_valid():
form.save()
update_session_auth_hash(request, form.user)
messages.success(request, _("Your password has been changed successfully!"))
return redirect('wagtailadmin_account')
else:
form = SetPasswordForm(request.user)
else:
form = None
return render(request, 'wagtailadmin/account/change_password.html', {
'form': form,
'can_change_password': can_change_password,
})
def notification_preferences(request):
if request.POST:
form = NotificationPreferencesForm(request.POST, instance=UserProfile.get_for_user(request.user))
if form.is_valid():
form.save()
messages.success(request, _("Your preferences have been updated successfully!"))
return redirect('wagtailadmin_account')
else:
form = NotificationPreferencesForm(instance=UserProfile.get_for_user(request.user))
# quick-and-dirty catch-all in case the form has been rendered with no
# fields, as the user has no customisable permissions
if not form.fields:
return redirect('wagtailadmin_account')
return render(request, 'wagtailadmin/account/notification_preferences.html', {
'form': form,
})
@sensitive_post_parameters()
@never_cache
def login(request):
if request.user.is_authenticated() and request.user.has_perm('wagtailadmin.access_admin'):
return redirect('wagtailadmin_home')
else:
from django.contrib.auth import get_user_model
return auth_login(request,
template_name='wagtailadmin/login.html',
authentication_form=forms.LoginForm,
extra_context={
'show_password_reset': getattr(settings, 'WAGTAIL_PASSWORD_MANAGEMENT_ENABLED', True),
'username_field': get_user_model().USERNAME_FIELD,
},
)
def logout(request):
response = auth_logout(request, next_page='wagtailadmin_login')
# By default, logging out will generate a fresh sessionid cookie. We want to use the
# absence of sessionid as an indication that front-end pages are being viewed by a
# non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.
response.delete_cookie(settings.SESSION_COOKIE_NAME,
domain=settings.SESSION_COOKIE_DOMAIN,
path=settings.SESSION_COOKIE_PATH)
# HACK: pretend that the session hasn't been modified, so that SessionMiddleware
# won't override the above and write a new cookie.
request.session.modified = False
return response
| bsd-3-clause |
Sweetgrassbuffalo/ReactionSweeGrass-v2 | .meteor/local/dev_bundle/python/Lib/ctypes/test/test_frombuffer.py | 26 | 2461 | from ctypes import *
import array
import gc
import unittest
class X(Structure):
_fields_ = [("c_int", c_int)]
init_called = False
def __init__(self):
self._init_called = True
class Test(unittest.TestCase):
def test_fom_buffer(self):
a = array.array("i", range(16))
x = (c_int * 16).from_buffer(a)
y = X.from_buffer(a)
self.assertEqual(y.c_int, a[0])
self.assertFalse(y.init_called)
self.assertEqual(x[:], a.tolist())
a[0], a[-1] = 200, -200
self.assertEqual(x[:], a.tolist())
self.assertIn(a, x._objects.values())
self.assertRaises(ValueError,
c_int.from_buffer, a, -1)
expected = x[:]
del a; gc.collect(); gc.collect(); gc.collect()
self.assertEqual(x[:], expected)
self.assertRaises(TypeError,
(c_char * 16).from_buffer, "a" * 16)
def test_fom_buffer_with_offset(self):
a = array.array("i", range(16))
x = (c_int * 15).from_buffer(a, sizeof(c_int))
self.assertEqual(x[:], a.tolist()[1:])
self.assertRaises(ValueError, lambda: (c_int * 16).from_buffer(a, sizeof(c_int)))
self.assertRaises(ValueError, lambda: (c_int * 1).from_buffer(a, 16 * sizeof(c_int)))
def test_from_buffer_copy(self):
a = array.array("i", range(16))
x = (c_int * 16).from_buffer_copy(a)
y = X.from_buffer_copy(a)
self.assertEqual(y.c_int, a[0])
self.assertFalse(y.init_called)
self.assertEqual(x[:], range(16))
a[0], a[-1] = 200, -200
self.assertEqual(x[:], range(16))
self.assertEqual(x._objects, None)
self.assertRaises(ValueError,
c_int.from_buffer, a, -1)
del a; gc.collect(); gc.collect(); gc.collect()
self.assertEqual(x[:], range(16))
x = (c_char * 16).from_buffer_copy("a" * 16)
self.assertEqual(x[:], "a" * 16)
def test_fom_buffer_copy_with_offset(self):
a = array.array("i", range(16))
x = (c_int * 15).from_buffer_copy(a, sizeof(c_int))
self.assertEqual(x[:], a.tolist()[1:])
self.assertRaises(ValueError,
(c_int * 16).from_buffer_copy, a, sizeof(c_int))
self.assertRaises(ValueError,
(c_int * 1).from_buffer_copy, a, 16 * sizeof(c_int))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
raphui/linux | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/Mako-0.7.2-py2.7.egg/mako/ext/autohandler.py | 57 | 1841 | # ext/autohandler.py
# Copyright (C) 2006-2012 the Mako authors and contributors <see AUTHORS file>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""adds autohandler functionality to Mako templates.
requires that the TemplateLookup class is used with templates.
usage:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context)}"/>
or with custom autohandler filename:
<%!
from mako.ext.autohandler import autohandler
%>
<%inherit file="${autohandler(template, context, name='somefilename')}"/>
"""
import posixpath, os, re
def autohandler(template, context, name='autohandler'):
lookup = context.lookup
_template_uri = template.module._template_uri
if not lookup.filesystem_checks:
try:
return lookup._uri_cache[(autohandler, _template_uri, name)]
except KeyError:
pass
tokens = re.findall(r'([^/]+)', posixpath.dirname(_template_uri)) + [name]
while len(tokens):
path = '/' + '/'.join(tokens)
if path != _template_uri and _file_exists(lookup, path):
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), path)
else:
return path
if len(tokens) == 1:
break
tokens[-2:] = [name]
if not lookup.filesystem_checks:
return lookup._uri_cache.setdefault(
(autohandler, _template_uri, name), None)
else:
return None
def _file_exists(lookup, path):
psub = re.sub(r'^/', '',path)
for d in lookup.directories:
if os.path.exists(d + '/' + psub):
return True
else:
return False
| apache-2.0 |
dhodhala88/Bosch1 | weblate/trans/views/git.py | 2 | 5180 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.utils.translation import ugettext as _
from django.contrib import messages
from django.contrib.auth.decorators import login_required, permission_required
from weblate.trans.views.helper import (
get_project, get_subproject, get_translation
)
from weblate.trans.filelock import FileLockException
from weblate.trans.util import redirect_param
def execute_locked(request, obj, message, call, *args, **kwargs):
"""
Helper function to catch possible lock exception.
"""
try:
result = call(request, *args, **kwargs)
# With False the call is supposed to show errors on it's own
if result is None or result:
messages.success(request, message)
except FileLockException:
messages.error(
request,
_('Failed to lock the repository, another operation in progress.')
)
return redirect_param(obj, '#repository')
def perform_commit(request, obj):
"""
Helper function to do the repository commmit.
"""
return execute_locked(
request,
obj,
_('All pending translations were committed.'),
obj.commit_pending,
)
def perform_update(request, obj):
"""
Helper function to do the repository update.
"""
return execute_locked(
request,
obj,
_('All repositories were updated.'),
obj.do_update,
method=request.GET.get('method', None)
)
def perform_push(request, obj):
"""
Helper function to do the repository push.
"""
return execute_locked(
request,
obj,
_('All repositories were pushed.'),
obj.do_push
)
def perform_reset(request, obj):
"""
Helper function to do the repository reset.
"""
return execute_locked(
request,
obj,
_('All repositories have been reset.'),
obj.do_reset
)
@login_required
@permission_required('trans.commit_translation')
def commit_project(request, project):
obj = get_project(request, project)
return perform_commit(request, obj)
@login_required
@permission_required('trans.commit_translation')
def commit_subproject(request, project, subproject):
obj = get_subproject(request, project, subproject)
return perform_commit(request, obj)
@login_required
@permission_required('trans.commit_translation')
def commit_translation(request, project, subproject, lang):
obj = get_translation(request, project, subproject, lang)
return perform_commit(request, obj)
@login_required
@permission_required('trans.update_translation')
def update_project(request, project):
obj = get_project(request, project)
return perform_update(request, obj)
@login_required
@permission_required('trans.update_translation')
def update_subproject(request, project, subproject):
obj = get_subproject(request, project, subproject)
return perform_update(request, obj)
@login_required
@permission_required('trans.update_translation')
def update_translation(request, project, subproject, lang):
obj = get_translation(request, project, subproject, lang)
return perform_update(request, obj)
@login_required
@permission_required('trans.push_translation')
def push_project(request, project):
obj = get_project(request, project)
return perform_push(request, obj)
@login_required
@permission_required('trans.push_translation')
def push_subproject(request, project, subproject):
obj = get_subproject(request, project, subproject)
return perform_push(request, obj)
@login_required
@permission_required('trans.push_translation')
def push_translation(request, project, subproject, lang):
obj = get_translation(request, project, subproject, lang)
return perform_push(request, obj)
@login_required
@permission_required('trans.reset_translation')
def reset_project(request, project):
obj = get_project(request, project)
return perform_reset(request, obj)
@login_required
@permission_required('trans.reset_translation')
def reset_subproject(request, project, subproject):
obj = get_subproject(request, project, subproject)
return perform_reset(request, obj)
@login_required
@permission_required('trans.reset_translation')
def reset_translation(request, project, subproject, lang):
obj = get_translation(request, project, subproject, lang)
return perform_reset(request, obj)
| gpl-3.0 |
jayflo/scikit-learn | sklearn/feature_selection/tests/test_rfe.py | 209 | 11733 | """
Testing Recursive feature elimination
"""
import warnings
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_array_equal
from nose.tools import assert_equal, assert_true
from scipy import sparse
from sklearn.feature_selection.rfe import RFE, RFECV
from sklearn.datasets import load_iris, make_friedman1
from sklearn.metrics import zero_one_loss
from sklearn.svm import SVC, SVR
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import cross_val_score
from sklearn.utils import check_random_state
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_greater
from sklearn.metrics import make_scorer
from sklearn.metrics import get_scorer
class MockClassifier(object):
"""
Dummy classifier to test recursive feature ellimination
"""
def __init__(self, foo_param=0):
self.foo_param = foo_param
def fit(self, X, Y):
assert_true(len(X) == len(Y))
self.coef_ = np.ones(X.shape[1], dtype=np.float64)
return self
def predict(self, T):
return T.shape[0]
predict_proba = predict
decision_function = predict
transform = predict
def score(self, X=None, Y=None):
if self.foo_param > 1:
score = 1.
else:
score = 0.
return score
def get_params(self, deep=True):
return {'foo_param': self.foo_param}
def set_params(self, **params):
return self
def test_rfe_set_params():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
y_pred = rfe.fit(X, y).predict(X)
clf = SVC()
with warnings.catch_warnings(record=True):
# estimator_params is deprecated
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'})
y_pred2 = rfe.fit(X, y).predict(X)
assert_array_equal(y_pred, y_pred2)
def test_rfe_features_importance():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
clf = RandomForestClassifier(n_estimators=20,
random_state=generator, max_depth=2)
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
assert_equal(len(rfe.ranking_), X.shape[1])
clf_svc = SVC(kernel="linear")
rfe_svc = RFE(estimator=clf_svc, n_features_to_select=4, step=0.1)
rfe_svc.fit(X, y)
# Check if the supports are equal
assert_array_equal(rfe.get_support(), rfe_svc.get_support())
def test_rfe_deprecation_estimator_params():
deprecation_message = ("The parameter 'estimator_params' is deprecated as "
"of version 0.16 and will be removed in 0.18. The "
"parameter is no longer necessary because the "
"value is set via the estimator initialisation or "
"set_params method.")
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
assert_warns_message(DeprecationWarning, deprecation_message,
RFE(estimator=SVC(), n_features_to_select=4, step=0.1,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
assert_warns_message(DeprecationWarning, deprecation_message,
RFECV(estimator=SVC(), step=1, cv=5,
estimator_params={'kernel': 'linear'}).fit,
X=X,
y=y)
def test_rfe():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
X_sparse = sparse.csr_matrix(X)
y = iris.target
# dense model
clf = SVC(kernel="linear")
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
# sparse model
clf_sparse = SVC(kernel="linear")
rfe_sparse = RFE(estimator=clf_sparse, n_features_to_select=4, step=0.1)
rfe_sparse.fit(X_sparse, y)
X_r_sparse = rfe_sparse.transform(X_sparse)
assert_equal(X_r.shape, iris.data.shape)
assert_array_almost_equal(X_r[:10], iris.data[:10])
assert_array_almost_equal(rfe.predict(X), clf.predict(iris.data))
assert_equal(rfe.score(X, y), clf.score(iris.data, iris.target))
assert_array_almost_equal(X_r, X_r_sparse.toarray())
def test_rfe_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = iris.target
# dense model
clf = MockClassifier()
rfe = RFE(estimator=clf, n_features_to_select=4, step=0.1)
rfe.fit(X, y)
X_r = rfe.transform(X)
clf.fit(X_r, y)
assert_equal(len(rfe.ranking_), X.shape[1])
assert_equal(X_r.shape, iris.data.shape)
def test_rfecv():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
# All the noisy variable were filtered out
assert_array_equal(X_r, iris.data)
# same in sparse
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
# Test using a customized loss function
scoring = make_scorer(zero_one_loss, greater_is_better=False)
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scoring)
ignore_warnings(rfecv.fit)(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test using a scorer
scorer = get_scorer('accuracy')
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=scorer)
rfecv.fit(X, y)
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
# Test fix on grid_scores
def test_scorer(estimator, X, y):
return 1.0
rfecv = RFECV(estimator=SVC(kernel="linear"), step=1, cv=5,
scoring=test_scorer)
rfecv.fit(X, y)
assert_array_equal(rfecv.grid_scores_, np.ones(len(rfecv.grid_scores_)))
# Same as the first two tests, but with step=2
rfecv = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
rfecv.fit(X, y)
assert_equal(len(rfecv.grid_scores_), 6)
assert_equal(len(rfecv.ranking_), X.shape[1])
X_r = rfecv.transform(X)
assert_array_equal(X_r, iris.data)
rfecv_sparse = RFECV(estimator=SVC(kernel="linear"), step=2, cv=5)
X_sparse = sparse.csr_matrix(X)
rfecv_sparse.fit(X_sparse, y)
X_r_sparse = rfecv_sparse.transform(X_sparse)
assert_array_equal(X_r_sparse.toarray(), iris.data)
def test_rfecv_mockclassifier():
generator = check_random_state(0)
iris = load_iris()
X = np.c_[iris.data, generator.normal(size=(len(iris.data), 6))]
y = list(iris.target) # regression test: list should be supported
# Test using the score function
rfecv = RFECV(estimator=MockClassifier(), step=1, cv=5)
rfecv.fit(X, y)
# non-regression test for missing worst feature:
assert_equal(len(rfecv.grid_scores_), X.shape[1])
assert_equal(len(rfecv.ranking_), X.shape[1])
def test_rfe_estimator_tags():
rfe = RFE(SVC(kernel='linear'))
assert_equal(rfe._estimator_type, "classifier")
# make sure that cross-validation is stratified
iris = load_iris()
score = cross_val_score(rfe, iris.data, iris.target)
assert_greater(score.min(), .7)
def test_rfe_min_step():
n_features = 10
X, y = make_friedman1(n_samples=50, n_features=n_features, random_state=0)
n_samples, n_features = X.shape
estimator = SVR(kernel="linear")
# Test when floor(step * n_features) <= 0
selector = RFE(estimator, step=0.01)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is between (0,1) and floor(step * n_features) > 0
selector = RFE(estimator, step=0.20)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
# Test when step is an integer
selector = RFE(estimator, step=5)
sel = selector.fit(X, y)
assert_equal(sel.support_.sum(), n_features // 2)
def test_number_of_subsets_of_features():
# In RFE, 'number_of_subsets_of_features'
# = the number of iterations in '_fit'
# = max(ranking_)
# = 1 + (n_features + step - n_features_to_select - 1) // step
# After optimization #4534, this number
# = 1 + np.ceil((n_features - n_features_to_select) / float(step))
# This test case is to test their equivalence, refer to #4534 and #3824
def formula1(n_features, n_features_to_select, step):
return 1 + ((n_features + step - n_features_to_select - 1) // step)
def formula2(n_features, n_features_to_select, step):
return 1 + np.ceil((n_features - n_features_to_select) / float(step))
# RFE
# Case 1, n_features - n_features_to_select is divisible by step
# Case 2, n_features - n_features_to_select is not divisible by step
n_features_list = [11, 11]
n_features_to_select_list = [3, 3]
step_list = [2, 3]
for n_features, n_features_to_select, step in zip(
n_features_list, n_features_to_select_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfe = RFE(estimator=SVC(kernel="linear"),
n_features_to_select=n_features_to_select, step=step)
rfe.fit(X, y)
# this number also equals to the maximum of ranking_
assert_equal(np.max(rfe.ranking_),
formula1(n_features, n_features_to_select, step))
assert_equal(np.max(rfe.ranking_),
formula2(n_features, n_features_to_select, step))
# In RFECV, 'fit' calls 'RFE._fit'
# 'number_of_subsets_of_features' of RFE
# = the size of 'grid_scores' of RFECV
# = the number of iterations of the for loop before optimization #4534
# RFECV, n_features_to_select = 1
# Case 1, n_features - 1 is divisible by step
# Case 2, n_features - 1 is not divisible by step
n_features_to_select = 1
n_features_list = [11, 10]
step_list = [2, 2]
for n_features, step in zip(n_features_list, step_list):
generator = check_random_state(43)
X = generator.normal(size=(100, n_features))
y = generator.rand(100).round()
rfecv = RFECV(estimator=SVC(kernel="linear"), step=step, cv=5)
rfecv.fit(X, y)
assert_equal(rfecv.grid_scores_.shape[0],
formula1(n_features, n_features_to_select, step))
assert_equal(rfecv.grid_scores_.shape[0],
formula2(n_features, n_features_to_select, step))
| bsd-3-clause |
sanguinariojoe/aquagpusph | examples/2D/spheric_testcase6_movingsquare/cMake/Motion.py | 1 | 2887 | #########################################################################
# #
# # ## # # # # #
# # # # # # # # # # #
# ##### # # # # ##### ## ### # # ## ### ### #
# # # # # # # # # # # # # # # # # # # # #
# # # # # # # # # # # # # # # # # # # # #
# # # ## # ## # # ### ### ### ## ### # # #
# # # # #
# ## # # #
# #
#########################################################################
#
# This file is part of AQUA-gpusph, a free CFD program based on SPH.
# Copyright (C) 2012 Jose Luis Cercos Pita <jl.cercos@upm.es>
#
# AQUA-gpusph is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# AQUA-gpusph is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with AQUA-gpusph. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import numpy as np
import os.path as path
import aquagpusph as aqua
# Read the experimental data
f = open(path.join('@EXAMPLE_DEST_DIR@/doc', 'Motion_Body.dat'), 'r')
lines = f.readlines()
f.close()
T = []
X = []
U = []
DUDT = []
for l in lines[2:]:
l = l.strip()
while l.find(' ') != -1:
l = l.replace(' ', ' ')
if l == '':
continue
t, dudt, u, x = map(float, l.split(' '))
T.append(t)
X.append(x)
U.append(u)
DUDT.append(dudt)
del f, lines
T = np.asarray(T)
X = np.asarray(X)
U = np.asarray(U)
DUDT = np.asarray(DUDT)
F = open('Motion.dat', 'w')
def main():
# Get the time instant
t = aqua.get("t")
# Interpolate the data
r = np.zeros(2, dtype=np.float32)
r[0] = np.interp(t, T, X)
u = np.zeros(2, dtype=np.float32)
u[0] = np.interp(t, T, U)
dudt = np.zeros(2, dtype=np.float32)
dudt[0] = np.interp(t, T, DUDT)
# Send it to AQUAgpusph
aqua.set("motion_r", r)
aqua.set("motion_drdt", u)
aqua.set("motion_ddrddt", dudt)
# Write output
F.write('{}\t{}\t{}\t{}\n'.format(t, r[0], u[0], dudt[0]))
F.flush()
return True | gpl-3.0 |
DrimTim32/py_proj_lights | develop_scripts/code_check/code_check.py | 1 | 3156 | import os
import re
import sys
total = 0.0
count = 0
BASE_DIRECTORY = os.getcwd()
EXTENDED = ""
TYPE = "text"
excluded_directories = ["develop_scripts", "tests", "docs", "demo"]
excluded_files = ["__init__.py", "setup.py", "custom_assertions.py", "conftest.py"]
expected_html_tags = ["<table>", "<html>"]
expected_parsable_lines = ["<h2>Global evaluation</h2>"]
def parse_html(line):
if "<html>" in line:
return ""
if "<table>" in line:
return '<table class="table" style="width:auto;">'
def parse_line(line):
if "Global evaluation" in line:
return "<h2>File evaluation</h2>"
def check(module):
global total, count, BASE_DIRECTORY
if module[-3:] == ".py":
pout = os.popen('pylint {} --output-format={}'.format(module, TYPE), 'r')
module = module.replace("../", "")
if TYPE == "html":
print('<button data-toggle="collapse" data-target="#{1}" class="btn btn-default" style="width: 500px;">'
'{0}</button> <div id="{1}" class="collapse">'
.format(module, module.replace(".", "-").replace("\\", "-")))
else:
"Checking : {0}".format(module)
for line in pout:
if line.strip() in expected_html_tags:
print(parse_html(line))
elif EXTENDED == "f":
print(line)
elif EXTENDED == "e" and line[0:2] in ["C:", "W:", "E:"]:
print(line)
elif "Your code has been rated at" in line:
print(line)
if "Your code has been rated at" in line:
score = re.findall("[-]?(\d+.\d+)", line)[0]
total += float(score)
count += 1
print("-" * 50 + "\n")
if TYPE == "html":
print("</div></br>")
def print_header():
print("<html><head>")
print('<script src="http://code.jquery.com/jquery-latest.min.js" type="text/javascript"></script>'
'<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/css/bootstrap.min.css" '
'integrity="sha384-BVYiiSIFeK1dGmJRAkycuHAHRg32OmUcww7on3RYdg4Va+PmSTsz/K68vbdEjh4u" '
'crossorigin="anonymous">'
'<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.7/js/bootstrap.min.js"'
' integrity="sha384-Tc5IQib027qvyjSMfHjOMaLkfuWVxZxUPnCJA7l2mCWNIpG9mGCD8wGNIcPD7Txa" '
'crossorigin="anonymous"></script>')
print("</head>")
if __name__ == "__main__":
BASE_DIRECTORY = sys.argv[1]
EXTENDED = sys.argv[2]
TYPE = sys.argv[3]
if len(sys.argv) > 4:
sys.stdout = open(sys.argv[4], 'w+')
if TYPE == "html":
print_header()
for root, dirs, files in os.walk(BASE_DIRECTORY):
for ignore in excluded_directories:
if ignore in dirs:
dirs.remove(ignore)
for name in files:
if name in excluded_files:
continue
check(os.path.join(root, name))
brk = "</br>" if TYPE == "html" else "\n"
print(brk + "%d modules found" % count)
print(brk + "AVERAGE SCORE = %.02f" % (total / count))
| mit |
dhruvsrivastava/OJ | python/lib/python2.7/site-packages/pip/utils/logging.py | 199 | 3326 | from __future__ import absolute_import
import contextlib
import logging
import logging.handlers
import os
try:
import threading
except ImportError:
import dummy_threading as threading
from pip.compat import WINDOWS
from pip.utils import ensure_dir
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_log_state = threading.local()
_log_state.indentation = 0
@contextlib.contextmanager
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emited inside it.
"""
_log_state.indentation += num
try:
yield
finally:
_log_state.indentation -= num
def get_indentation():
return getattr(_log_state, 'indentation', 0)
class IndentingFormatter(logging.Formatter):
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = logging.Formatter.format(self, record)
formatted = "".join([
(" " * get_indentation()) + line
for line in formatted.splitlines(True)
])
return formatted
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
class ColorizedStreamHandler(logging.StreamHandler):
# Don't build up a list of colors if we don't have colorama
if colorama:
COLORS = [
# This needs to be in order from highest logging level to lowest.
(logging.ERROR, _color_wrap(colorama.Fore.RED)),
(logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
]
else:
COLORS = []
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
if WINDOWS and colorama:
self.stream = colorama.AnsiToWin32(self.stream)
def should_color(self):
# Don't colorize things if we do not have colorama
if not colorama:
return False
real_stream = (
self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
else self.stream.wrapped
)
# If the stream is a tty we should color it
if hasattr(real_stream, "isatty") and real_stream.isatty():
return True
# If we have an ASNI term we should color it
if os.environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def format(self, record):
msg = logging.StreamHandler.format(self, record)
if self.should_color():
for level, color in self.COLORS:
if record.levelno >= level:
msg = color(msg)
break
return msg
class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
def _open(self):
ensure_dir(os.path.dirname(self.baseFilename))
return logging.handlers.RotatingFileHandler._open(self)
class MaxLevelFilter(logging.Filter):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
| bsd-3-clause |
venmo/ansible | lib/ansible/plugins/lookup/fileglob.py | 176 | 1345 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import glob
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
basedir = self.get_basedir(variables)
ret = []
for term in terms:
term_file = os.path.basename(term)
dwimmed_path = self._loader.path_dwim_relative(basedir, 'files', os.path.dirname(term))
globbed = glob.glob(os.path.join(dwimmed_path, term_file))
ret.extend(g for g in globbed if os.path.isfile(g))
return ret
| gpl-3.0 |
kobolabs/calibre | src/chardet/big5freq.py | 323 | 82588 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = ( \
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
| gpl-3.0 |
Tiryoh/mbed | workspace_tools/export/ds5_5.py | 22 | 1997 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from exporters import Exporter
from os.path import basename
class DS5_5(Exporter):
NAME = 'DS5'
TARGETS = [
'LPC1768',
'LPC11U24',
'LPC812',
'UBLOX_C027',
'ARCH_PRO',
'RZ_A1H',
]
USING_MICROLIB = [
'LPC812',
]
FILE_TYPES = {
'c_sources':'1',
'cpp_sources':'8',
's_sources':'2'
}
def get_toolchain(self):
return 'uARM' if (self.target in self.USING_MICROLIB) else 'ARM'
def generate(self):
source_files = []
for r_type, n in DS5_5.FILE_TYPES.iteritems():
for file in getattr(self.resources, r_type):
source_files.append({
'name': basename(file), 'type': n, 'path': file
})
ctx = {
'name': self.program_name,
'include_paths': self.resources.inc_dirs,
'scatter_file': self.resources.linker_script,
'object_files': self.resources.objects + self.resources.libraries,
'source_files': source_files,
'symbols': self.get_symbols()
}
target = self.target.lower()
# Project file
self.gen_file('ds5_5_%s.project.tmpl' % target, ctx, '.project')
self.gen_file('ds5_5_%s.cproject.tmpl' % target, ctx, '.cproject')
self.gen_file('ds5_5_%s.launch.tmpl' % target, ctx, 'ds5_%s.launch' % target)
| apache-2.0 |
androidarmv6/android_external_chromium_org | tools/crx_id/crx_id_unittest.py | 120 | 3482 | #!/usr/bin/env python
#
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Verify that crx_id.py generates a reasonable string for a canned CRX file.
"""
import crx_id
import os
import shutil
import sys
import unittest
import tempfile
class CrxIdUnittest(unittest.TestCase):
CRX_ID_DIR = os.path.abspath(os.path.dirname(sys.argv[0]))
PACKED_CRX = os.path.join(CRX_ID_DIR,
'jebgalgnebhfojomionfpkfelancnnkf.crx')
PACKED_APP_ID = 'jebgalgnebhfojomionfpkfelancnnkf'
PACKED_HASH_BYTES = \
'{0x94, 0x16, 0x0b, 0x6d, 0x41, 0x75, 0xe9, 0xec,' \
' 0x8e, 0xd5, 0xfa, 0x54, 0xb0, 0xd2, 0xdd, 0xa5,' \
' 0x6e, 0x05, 0x6b, 0xe8, 0x73, 0x47, 0xf6, 0xc4,' \
' 0x11, 0x9f, 0xbc, 0xb3, 0x09, 0xb3, 0x5b, 0x40}'
def testPackedHashAppId(self):
""" Test the output generated for a canned, packed CRX. """
self.assertEqual(crx_id.GetCRXAppID(self.PACKED_CRX),
self.PACKED_APP_ID)
self.assertEqual(crx_id.GetCRXHash(self.PACKED_CRX),
self.PACKED_HASH_BYTES)
UNPACKED_APP_ID = 'cbcdidchbppangcjoddlpdjlenngjldk'
UNPACKED_HASH_BYTES = \
'{0x21, 0x23, 0x83, 0x27, 0x1f, 0xf0, 0xd6, 0x29,' \
' 0xe3, 0x3b, 0xf3, 0x9b, 0x4d, 0xd6, 0x9b, 0x3a,' \
' 0xff, 0x7d, 0x6b, 0xc4, 0x78, 0x30, 0x47, 0xa6,' \
' 0x23, 0x12, 0x72, 0x84, 0x9b, 0x9a, 0xf6, 0x3c}'
def testUnpackedHashAppId(self):
""" Test the output generated for a canned, unpacked extension. """
# Copy ../../chrome/test/data/extensions/unpacked/manifest_with_key.json
# to a temporary location.
unpacked_test_manifest_path = os.path.join(
self.CRX_ID_DIR,
'..', '..', 'chrome', 'test', 'data', 'extensions', 'unpacked',
'manifest_with_key.json')
temp_unpacked_crx = tempfile.mkdtemp()
shutil.copy2(unpacked_test_manifest_path,
os.path.join(temp_unpacked_crx, 'manifest.json'))
self.assertEqual(crx_id.GetCRXAppID(temp_unpacked_crx),
self.UNPACKED_APP_ID)
self.assertEqual(crx_id.GetCRXHash(temp_unpacked_crx),
self.UNPACKED_HASH_BYTES)
self.assertTrue(crx_id.HasPublicKey(temp_unpacked_crx))
shutil.rmtree(temp_unpacked_crx)
def testFromFilePath(self):
""" Test calculation of extension id from file paths. """
self.assertEqual(crx_id.GetCRXAppID('/tmp/temp_extension',
from_file_path=True),
'ajbbicncdkdlchpjplgjaglppbcbmaji')
def testFromWindowsPath(self):
self.assertEqual(crx_id.GetCRXAppID(r'D:\Documents\chrome\test_extension',
from_file_path=True,
is_win_path=True),
'fegemedmbnhglnecjgbdhekaghkccplm')
# Test drive letter normalization.
kWinPathId = 'aiinlcdagjihibappcdnnhcccdokjlaf'
self.assertEqual(crx_id.GetCRXAppID(r'c:\temp_extension',
from_file_path=True,
is_win_path=True),
kWinPathId)
self.assertEqual(crx_id.GetCRXAppID(r'C:\temp_extension',
from_file_path=True,
is_win_path=True),
kWinPathId)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
atlassian/dd-agent | utils/dockerutil.py | 7 | 6857 | # stdlib
import logging
import os
# 3rd party
from docker import Client
from docker import tls
class MountException(Exception):
pass
# Default docker client settings
DEFAULT_TIMEOUT = 5
DEFAULT_VERSION = 'auto'
CHECK_NAME = 'docker_daemon'
log = logging.getLogger(__name__)
_docker_client_settings = {"version": DEFAULT_VERSION}
def is_dockerized():
return os.environ.get("DOCKER_DD_AGENT") == "yes"
def get_docker_settings():
global _docker_client_settings
return _docker_client_settings
def reset_docker_settings():
global _docker_client_settings
_docker_client_settings = {"version": DEFAULT_VERSION}
def set_docker_settings(init_config, instance):
global _docker_client_settings
_docker_client_settings = {
"version": init_config.get('api_version', DEFAULT_VERSION),
"base_url": instance.get("url"),
"timeout": int(init_config.get('timeout', DEFAULT_TIMEOUT)),
}
if init_config.get('tls', False):
client_cert_path = init_config.get('tls_client_cert')
client_key_path = init_config.get('tls_client_key')
cacert = init_config.get('tls_cacert')
verify = init_config.get('tls_verify')
client_cert = None
if client_cert_path is not None and client_key_path is not None:
client_cert = (client_cert_path, client_key_path)
verify = verify if verify is not None else cacert
tls_config = tls.TLSConfig(client_cert=client_cert, verify=verify)
_docker_client_settings["tls"] = tls_config
def get_client():
return Client(**_docker_client_settings)
def find_cgroup(hierarchy, docker_root):
"""Find the mount point for a specified cgroup hierarchy.
Works with old style and new style mounts.
"""
with open(os.path.join(docker_root, "/proc/mounts"), 'r') as fp:
mounts = map(lambda x: x.split(), fp.read().splitlines())
cgroup_mounts = filter(lambda x: x[2] == "cgroup", mounts)
if len(cgroup_mounts) == 0:
raise Exception(
"Can't find mounted cgroups. If you run the Agent inside a container,"
" please refer to the documentation.")
# Old cgroup style
if len(cgroup_mounts) == 1:
return os.path.join(docker_root, cgroup_mounts[0][1])
candidate = None
for _, mountpoint, _, opts, _, _ in cgroup_mounts:
if hierarchy in opts:
if mountpoint.startswith("/host/"):
return os.path.join(docker_root, mountpoint)
candidate = mountpoint
if candidate is not None:
return os.path.join(docker_root, candidate)
raise Exception("Can't find mounted %s cgroups." % hierarchy)
def find_cgroup_filename_pattern(mountpoints, container_id):
# We try with different cgroups so that it works even if only one is properly working
for mountpoint in mountpoints.itervalues():
stat_file_path_lxc = os.path.join(mountpoint, "lxc")
stat_file_path_docker = os.path.join(mountpoint, "docker")
stat_file_path_coreos = os.path.join(mountpoint, "system.slice")
stat_file_path_kubernetes = os.path.join(mountpoint, container_id)
stat_file_path_kubernetes_docker = os.path.join(mountpoint, "system", "docker", container_id)
stat_file_path_docker_daemon = os.path.join(mountpoint, "docker-daemon", "docker", container_id)
if os.path.exists(stat_file_path_lxc):
return os.path.join('%(mountpoint)s/lxc/%(id)s/%(file)s')
elif os.path.exists(stat_file_path_docker):
return os.path.join('%(mountpoint)s/docker/%(id)s/%(file)s')
elif os.path.exists(stat_file_path_coreos):
return os.path.join('%(mountpoint)s/system.slice/docker-%(id)s.scope/%(file)s')
elif os.path.exists(stat_file_path_kubernetes):
return os.path.join('%(mountpoint)s/%(id)s/%(file)s')
elif os.path.exists(stat_file_path_kubernetes_docker):
return os.path.join('%(mountpoint)s/system/docker/%(id)s/%(file)s')
elif os.path.exists(stat_file_path_docker_daemon):
return os.path.join('%(mountpoint)s/docker-daemon/docker/%(id)s/%(file)s')
raise MountException("Cannot find Docker cgroup directory. Be sure your system is supported.")
def image_tag_extractor(entity, key):
if "Image" in entity:
split = entity["Image"].split(":")
if len(split) <= key:
return None
elif len(split) > 2:
# if the repo is in the image name and has the form 'docker.clearbit:5000'
# the split will be like [repo_url, repo_port/image_name, image_tag]. Let's avoid that
split = [':'.join(split[:-1]), split[-1]]
return [split[key]]
if "RepoTags" in entity:
splits = [el.split(":") for el in entity["RepoTags"]]
tags = set()
for split in splits:
if len(split) > 2:
split = [':'.join(split[:-1]), split[-1]]
if len(split) > key:
tags.add(split[key])
if len(tags) > 0:
return list(tags)
return None
def container_name_extractor(co):
names = co.get('Names', [])
if names is not None:
# we sort the list to make sure that a docker API update introducing
# new names with a single "/" won't make us report dups.
names = sorted(names)
for name in names:
# the leading "/" is legit, if there's another one it means the name is actually an alias
if name.count('/') <= 1:
return [str(name).lstrip('/')]
return co.get('Id')[:11]
def get_hostname():
"""Return the `Name` param from `docker info` to use as the hostname"""
from config import get_confd_path, check_yaml, PathNotFound
confd_path = ''
try:
confd_path = get_confd_path()
except PathNotFound:
log.error("Couldn't find the check configuration folder, not using the docker hostname.")
return None
conf_path = os.path.join(confd_path, '%s.yaml' % CHECK_NAME)
if not os.path.exists(conf_path):
default_conf_path = os.path.join(confd_path, '%s.yaml.default' % CHECK_NAME)
if not os.path.exists(default_conf_path):
log.error("Couldn't find any configuration file for the docker check."
" Not using the docker hostname.")
return None
else:
conf_path = default_conf_path
check_config = check_yaml(conf_path)
init_config, instances = check_config.get('init_config', {}), check_config['instances']
init_config = {} if init_config is None else init_config
if len(instances) > 0:
set_docker_settings(init_config, instances[0])
return get_client().info().get("Name")
return None
| bsd-3-clause |
SaschaMester/delicium | third_party/mojo/src/mojo/public/third_party/markupsafe/__init__.py | 371 | 8205 | # -*- coding: utf-8 -*-
"""
markupsafe
~~~~~~~~~~
Implements a Markup string.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
from markupsafe._compat import text_type, string_types, int_types, \
unichr, PY2
__all__ = ['Markup', 'soft_unicode', 'escape', 'escape_silent']
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
class Markup(text_type):
r"""Marks a string as being safe for inclusion in HTML/XML output without
needing to be escaped. This implements the `__html__` interface a couple
of frameworks and web applications use. :class:`Markup` is a direct
subclass of `unicode` and provides all the methods of `unicode` just that
it escapes arguments passed and always returns `Markup`.
The `escape` function returns markup objects so that double escaping can't
happen.
The constructor of the :class:`Markup` class can be used for three
different things: When passed an unicode object it's assumed to be safe,
when passed an object with an HTML representation (has an `__html__`
method) that representation is used, otherwise the object passed is
converted into a unicode string and then assumed to be safe:
>>> Markup("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
>>> class Foo(object):
... def __html__(self):
... return '<a href="#">foo</a>'
...
>>> Markup(Foo())
Markup(u'<a href="#">foo</a>')
If you want object passed being always treated as unsafe you can use the
:meth:`escape` classmethod to create a :class:`Markup` object:
>>> Markup.escape("Hello <em>World</em>!")
Markup(u'Hello <em>World</em>!')
Operations on a markup string are markup aware which means that all
arguments are passed through the :func:`escape` function:
>>> em = Markup("<em>%s</em>")
>>> em % "foo & bar"
Markup(u'<em>foo & bar</em>')
>>> strong = Markup("<strong>%(text)s</strong>")
>>> strong % {'text': '<blink>hacker here</blink>'}
Markup(u'<strong><blink>hacker here</blink></strong>')
>>> Markup("<em>Hello</em> ") + "<foo>"
Markup(u'<em>Hello</em> <foo>')
"""
__slots__ = ()
def __new__(cls, base=u'', encoding=None, errors='strict'):
if hasattr(base, '__html__'):
base = base.__html__()
if encoding is None:
return text_type.__new__(cls, base)
return text_type.__new__(cls, base, encoding, errors)
def __html__(self):
return self
def __add__(self, other):
if isinstance(other, string_types) or hasattr(other, '__html__'):
return self.__class__(super(Markup, self).__add__(self.escape(other)))
return NotImplemented
def __radd__(self, other):
if hasattr(other, '__html__') or isinstance(other, string_types):
return self.escape(other).__add__(self)
return NotImplemented
def __mul__(self, num):
if isinstance(num, int_types):
return self.__class__(text_type.__mul__(self, num))
return NotImplemented
__rmul__ = __mul__
def __mod__(self, arg):
if isinstance(arg, tuple):
arg = tuple(_MarkupEscapeHelper(x, self.escape) for x in arg)
else:
arg = _MarkupEscapeHelper(arg, self.escape)
return self.__class__(text_type.__mod__(self, arg))
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
text_type.__repr__(self)
)
def join(self, seq):
return self.__class__(text_type.join(self, map(self.escape, seq)))
join.__doc__ = text_type.join.__doc__
def split(self, *args, **kwargs):
return list(map(self.__class__, text_type.split(self, *args, **kwargs)))
split.__doc__ = text_type.split.__doc__
def rsplit(self, *args, **kwargs):
return list(map(self.__class__, text_type.rsplit(self, *args, **kwargs)))
rsplit.__doc__ = text_type.rsplit.__doc__
def splitlines(self, *args, **kwargs):
return list(map(self.__class__, text_type.splitlines(self, *args, **kwargs)))
splitlines.__doc__ = text_type.splitlines.__doc__
def unescape(self):
r"""Unescape markup again into an text_type string. This also resolves
known HTML4 and XHTML entities:
>>> Markup("Main » <em>About</em>").unescape()
u'Main \xbb <em>About</em>'
"""
from markupsafe._constants import HTML_ENTITIES
def handle_match(m):
name = m.group(1)
if name in HTML_ENTITIES:
return unichr(HTML_ENTITIES[name])
try:
if name[:2] in ('#x', '#X'):
return unichr(int(name[2:], 16))
elif name.startswith('#'):
return unichr(int(name[1:]))
except ValueError:
pass
return u''
return _entity_re.sub(handle_match, text_type(self))
def striptags(self):
r"""Unescape markup into an text_type string and strip all tags. This
also resolves known HTML4 and XHTML entities. Whitespace is
normalized to one:
>>> Markup("Main » <em>About</em>").striptags()
u'Main \xbb About'
"""
stripped = u' '.join(_striptags_re.sub('', self).split())
return Markup(stripped).unescape()
@classmethod
def escape(cls, s):
"""Escape the string. Works like :func:`escape` with the difference
that for subclasses of :class:`Markup` this function would return the
correct subclass.
"""
rv = escape(s)
if rv.__class__ is not cls:
return cls(rv)
return rv
def make_wrapper(name):
orig = getattr(text_type, name)
def func(self, *args, **kwargs):
args = _escape_argspec(list(args), enumerate(args), self.escape)
#_escape_argspec(kwargs, kwargs.iteritems(), None)
return self.__class__(orig(self, *args, **kwargs))
func.__name__ = orig.__name__
func.__doc__ = orig.__doc__
return func
for method in '__getitem__', 'capitalize', \
'title', 'lower', 'upper', 'replace', 'ljust', \
'rjust', 'lstrip', 'rstrip', 'center', 'strip', \
'translate', 'expandtabs', 'swapcase', 'zfill':
locals()[method] = make_wrapper(method)
# new in python 2.5
if hasattr(text_type, 'partition'):
def partition(self, sep):
return tuple(map(self.__class__,
text_type.partition(self, self.escape(sep))))
def rpartition(self, sep):
return tuple(map(self.__class__,
text_type.rpartition(self, self.escape(sep))))
# new in python 2.6
if hasattr(text_type, 'format'):
format = make_wrapper('format')
# not in python 3
if hasattr(text_type, '__getslice__'):
__getslice__ = make_wrapper('__getslice__')
del method, make_wrapper
def _escape_argspec(obj, iterable, escape):
"""Helper for various string-wrapped functions."""
for key, value in iterable:
if hasattr(value, '__html__') or isinstance(value, string_types):
obj[key] = escape(value)
return obj
class _MarkupEscapeHelper(object):
"""Helper for Markup.__mod__"""
def __init__(self, obj, escape):
self.obj = obj
self.escape = escape
__getitem__ = lambda s, x: _MarkupEscapeHelper(s.obj[x], s.escape)
__unicode__ = __str__ = lambda s: text_type(s.escape(s.obj))
__repr__ = lambda s: str(s.escape(repr(s.obj)))
__int__ = lambda s: int(s.obj)
__float__ = lambda s: float(s.obj)
# we have to import it down here as the speedups and native
# modules imports the markup type which is define above.
try:
from markupsafe._speedups import escape, escape_silent, soft_unicode
except ImportError:
from markupsafe._native import escape, escape_silent, soft_unicode
if not PY2:
soft_str = soft_unicode
__all__.append('soft_str')
| bsd-3-clause |
iRGBit/QGIS | python/plugins/processing/gui/EditScriptAction.py | 23 | 2221 | # -*- coding: utf-8 -*-
"""
***************************************************************************
EditScriptAction.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from processing.gui.ContextAction import ContextAction
from processing.gui.ScriptEditorDialog import ScriptEditorDialog
from processing.algs.r.RAlgorithm import RAlgorithm
from processing.script.ScriptAlgorithm import ScriptAlgorithm
class EditScriptAction(ContextAction):
SCRIPT_PYTHON = 0
SCRIPT_R = 1
def __init__(self, scriptType):
self.name = self.tr('Edit script', 'EditScriptAction')
self.scriptType = scriptType
def isEnabled(self):
if self.scriptType == ScriptEditorDialog.SCRIPT_PYTHON:
return isinstance(self.alg, ScriptAlgorithm) and self.alg.allowEdit
elif self.scriptType == ScriptEditorDialog.SCRIPT_R:
return isinstance(self.alg, RAlgorithm)
def execute(self):
dlg = ScriptEditorDialog(self.scriptType, self.alg)
dlg.show()
dlg.exec_()
if dlg.update:
if self.scriptType == ScriptEditorDialog.SCRIPT_PYTHON:
self.toolbox.updateProvider('script')
elif self.scriptType == ScriptEditorDialog.SCRIPT_R:
self.toolbox.updateProvider('r')
| gpl-2.0 |
nishant8BITS/node-gyp | gyp/pylib/gyp/xcode_emulation.py | 241 | 63805 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import copy
import gyp.common
import os
import os.path
import re
import shlex
import subprocess
import sys
import tempfile
from gyp.common import GypError
# Populated lazily by XcodeVersion, for efficiency, and to fix an issue when
# "xcodebuild" is called too quickly (it has been found to return incorrect
# version number).
XCODE_VERSION_CACHE = None
# Populated lazily by GetXcodeArchsDefault, to an |XcodeArchsDefault| instance
# corresponding to the installed version of Xcode.
XCODE_ARCHS_DEFAULT_CACHE = None
def XcodeArchsVariableMapping(archs, archs_including_64_bit=None):
"""Constructs a dictionary with expansion for $(ARCHS_STANDARD) variable,
and optionally for $(ARCHS_STANDARD_INCLUDING_64_BIT)."""
mapping = {'$(ARCHS_STANDARD)': archs}
if archs_including_64_bit:
mapping['$(ARCHS_STANDARD_INCLUDING_64_BIT)'] = archs_including_64_bit
return mapping
class XcodeArchsDefault(object):
"""A class to resolve ARCHS variable from xcode_settings, resolving Xcode
macros and implementing filtering by VALID_ARCHS. The expansion of macros
depends on the SDKROOT used ("macosx", "iphoneos", "iphonesimulator") and
on the version of Xcode.
"""
# Match variable like $(ARCHS_STANDARD).
variable_pattern = re.compile(r'\$\([a-zA-Z_][a-zA-Z0-9_]*\)$')
def __init__(self, default, mac, iphonesimulator, iphoneos):
self._default = (default,)
self._archs = {'mac': mac, 'ios': iphoneos, 'iossim': iphonesimulator}
def _VariableMapping(self, sdkroot):
"""Returns the dictionary of variable mapping depending on the SDKROOT."""
sdkroot = sdkroot.lower()
if 'iphoneos' in sdkroot:
return self._archs['ios']
elif 'iphonesimulator' in sdkroot:
return self._archs['iossim']
else:
return self._archs['mac']
def _ExpandArchs(self, archs, sdkroot):
"""Expands variables references in ARCHS, and remove duplicates."""
variable_mapping = self._VariableMapping(sdkroot)
expanded_archs = []
for arch in archs:
if self.variable_pattern.match(arch):
variable = arch
try:
variable_expansion = variable_mapping[variable]
for arch in variable_expansion:
if arch not in expanded_archs:
expanded_archs.append(arch)
except KeyError as e:
print 'Warning: Ignoring unsupported variable "%s".' % variable
elif arch not in expanded_archs:
expanded_archs.append(arch)
return expanded_archs
def ActiveArchs(self, archs, valid_archs, sdkroot):
"""Expands variables references in ARCHS, and filter by VALID_ARCHS if it
is defined (if not set, Xcode accept any value in ARCHS, otherwise, only
values present in VALID_ARCHS are kept)."""
expanded_archs = self._ExpandArchs(archs or self._default, sdkroot or '')
if valid_archs:
filtered_archs = []
for arch in expanded_archs:
if arch in valid_archs:
filtered_archs.append(arch)
expanded_archs = filtered_archs
return expanded_archs
def GetXcodeArchsDefault():
"""Returns the |XcodeArchsDefault| object to use to expand ARCHS for the
installed version of Xcode. The default values used by Xcode for ARCHS
and the expansion of the variables depends on the version of Xcode used.
For all version anterior to Xcode 5.0 or posterior to Xcode 5.1 included
uses $(ARCHS_STANDARD) if ARCHS is unset, while Xcode 5.0 to 5.0.2 uses
$(ARCHS_STANDARD_INCLUDING_64_BIT). This variable was added to Xcode 5.0
and deprecated with Xcode 5.1.
For "macosx" SDKROOT, all version starting with Xcode 5.0 includes 64-bit
architecture as part of $(ARCHS_STANDARD) and default to only building it.
For "iphoneos" and "iphonesimulator" SDKROOT, 64-bit architectures are part
of $(ARCHS_STANDARD_INCLUDING_64_BIT) from Xcode 5.0. From Xcode 5.1, they
are also part of $(ARCHS_STANDARD).
All thoses rules are coded in the construction of the |XcodeArchsDefault|
object to use depending on the version of Xcode detected. The object is
for performance reason."""
global XCODE_ARCHS_DEFAULT_CACHE
if XCODE_ARCHS_DEFAULT_CACHE:
return XCODE_ARCHS_DEFAULT_CACHE
xcode_version, _ = XcodeVersion()
if xcode_version < '0500':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['i386']),
XcodeArchsVariableMapping(['armv7']))
elif xcode_version < '0510':
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD_INCLUDING_64_BIT)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s'],
['armv7', 'armv7s', 'arm64']))
else:
XCODE_ARCHS_DEFAULT_CACHE = XcodeArchsDefault(
'$(ARCHS_STANDARD)',
XcodeArchsVariableMapping(['x86_64'], ['x86_64']),
XcodeArchsVariableMapping(['i386', 'x86_64'], ['i386', 'x86_64']),
XcodeArchsVariableMapping(
['armv7', 'armv7s', 'arm64'],
['armv7', 'armv7s', 'arm64']))
return XCODE_ARCHS_DEFAULT_CACHE
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
_sdk_root_cache = {}
# Populated lazily by GetExtraPlistItems(). Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_plist_cache = {}
# Populated lazily by GetIOSPostbuilds. Shared by all XcodeSettings, so
# cached at class-level for efficiency.
_codesigning_key_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
self._ConvertConditionalKeys(configname)
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _ConvertConditionalKeys(self, configname):
"""Converts or warns on conditional keys. Xcode supports conditional keys,
such as CODE_SIGN_IDENTITY[sdk=iphoneos*]. This is a partial implementation
with some keys converted while the rest force a warning."""
settings = self.xcode_settings[configname]
conditional_keys = [key for key in settings if key.endswith(']')]
for key in conditional_keys:
# If you need more, speak up at http://crbug.com/122592
if key.endswith("[sdk=iphoneos*]"):
if configname.endswith("iphoneos"):
new_key = key.split("[")[0]
settings[new_key] = settings[key]
else:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
del settings[key]
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def IsBinaryOutputFormat(self, configname):
default = "binary" if self.isIOS else "xml"
format = self.xcode_settings[configname].get('INFOPLIST_OUTPUT_FORMAT',
default)
return format == "binary"
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def _IsIosAppExtension(self):
return int(self.spec.get('ios_app_extension', 0)) != 0
def _IsIosWatchKitExtension(self):
return int(self.spec.get('ios_watchkit_extension', 0)) != 0
def _IsIosWatchApp(self):
return int(self.spec.get('ios_watch_app', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
if self._IsIosAppExtension() or self._IsIosWatchKitExtension():
return '.' + self.spec.get('product_extension', 'appex')
else:
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsIosAppExtension():
assert self._IsBundle(), ('ios_app_extension flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.app-extension'
if self._IsIosWatchKitExtension():
assert self._IsBundle(), ('ios_watchkit_extension flag requires '
'mac_bundle (target %s)' % self.spec['target_name'])
return 'com.apple.product-type.watchkit-extension'
if self._IsIosWatchApp():
assert self._IsBundle(), ('ios_watch_app flag requires mac_bundle '
'(target %s)' % self.spec['target_name'])
return 'com.apple.product-type.application.watchapp'
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
config_settings = self.xcode_settings[configname]
xcode_archs_default = GetXcodeArchsDefault()
return xcode_archs_default.ActiveArchs(
config_settings.get('ARCHS'),
config_settings.get('VALID_ARCHS'),
config_settings.get('SDKROOT'))
def _GetSdkVersionInfoItem(self, sdk, infoitem):
# xcodebuild requires Xcode and can't run on Command Line Tools-only
# systems from 10.7 onward.
# Since the CLT has no SDK paths anyway, returning None is the
# most sensible route and should still do the right thing.
try:
return GetStdout(['xcodebuild', '-version', '-sdk', sdk, infoitem])
except:
pass
def _SdkRoot(self, configname):
if configname is None:
configname = self.configname
return self.GetPerConfigSetting('SDKROOT', configname, default='')
def _SdkPath(self, configname=None):
sdk_root = self._SdkRoot(configname)
if sdk_root.startswith('/'):
return sdk_root
return self._XcodeSdkPath(sdk_root)
def _XcodeSdkPath(self, sdk_root):
if sdk_root not in XcodeSettings._sdk_path_cache:
sdk_path = self._GetSdkVersionInfoItem(sdk_root, 'Path')
XcodeSettings._sdk_path_cache[sdk_root] = sdk_path
if sdk_root:
XcodeSettings._sdk_root_cache[sdk_path] = sdk_root
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings() and sdk_root:
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
if sdk_root:
framework_root = sdk_root
else:
framework_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', framework_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def _AddObjectiveCMissingPropertySynthesisFlags(self, flags):
if self._Test('CLANG_WARN_OBJC_MISSING_PROPERTY_SYNTHESIS',
'YES', default='NO'):
flags.append('-Wobjc-missing-property-synthesis')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
self._AddObjectiveCMissingPropertySynthesisFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = r'(\S+)'
WORD = r'\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings() and self._SdkPath():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
assert self.configname
archs = self.GetActiveArchs(self.configname)
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name and self.spec['type'] != 'loadable_module':
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
sdk_root = self._SdkPath()
if not sdk_root:
sdk_root = ''
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
is_extension = self._IsIosAppExtension() or self._IsIosWatchKitExtension()
if sdk_root and is_extension:
# Adds the link flags for extensions. These flags are common for all
# extensions and provide loader and main function.
# These flags reflect the compilation options used by xcode to compile
# extensions.
ldflags.append('-lpkstart')
ldflags.append(sdk_root +
'/System/Library/PrivateFrameworks/PlugInKit.framework/PlugInKit')
ldflags.append('-fapplication-extension')
ldflags.append('-Xlinker -rpath '
'-Xlinker @executable_path/../../Frameworks')
self._Appendf(ldflags, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerConfigSetting(self, setting, configname, default=None):
if configname in self.xcode_settings:
return self.xcode_settings[configname].get(setting, default)
else:
return self.GetPerTargetSetting(setting, default)
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
is_first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if is_first_pass:
result = self.xcode_settings[configname].get(setting, None)
is_first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, self.spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self.spec['type'] == 'loadable_module' and self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def _GetTargetPostbuilds(self, configname, output, output_binary,
quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _GetIOSPostbuilds(self, configname, output_binary):
"""Return a shell command to codesign the iOS output binary so it can
be deployed to a device. This should be run as the very last step of the
build."""
if not (self.isIOS and self.spec['type'] == 'executable'):
return []
settings = self.xcode_settings[configname]
key = self._GetIOSCodeSignIdentityKey(settings)
if not key:
return []
# Warn for any unimplemented signing xcode keys.
unimpl = ['OTHER_CODE_SIGN_FLAGS']
unimpl = set(unimpl) & set(self.xcode_settings[configname].keys())
if unimpl:
print 'Warning: Some codesign keys not implemented, ignoring: %s' % (
', '.join(sorted(unimpl)))
return ['%s code-sign-bundle "%s" "%s" "%s" "%s"' % (
os.path.join('${TARGET_BUILD_DIR}', 'gyp-mac-tool'), key,
settings.get('CODE_SIGN_RESOURCE_RULES_PATH', ''),
settings.get('CODE_SIGN_ENTITLEMENTS', ''),
settings.get('PROVISIONING_PROFILE', ''))
]
def _GetIOSCodeSignIdentityKey(self, settings):
identity = settings.get('CODE_SIGN_IDENTITY')
if not identity:
return None
if identity not in XcodeSettings._codesigning_key_cache:
output = subprocess.check_output(
['security', 'find-identity', '-p', 'codesigning', '-v'])
for line in output.splitlines():
if identity in line:
fingerprint = line.split()[1]
cache = XcodeSettings._codesigning_key_cache
assert identity not in cache or fingerprint == cache[identity], (
"Multiple codesigning fingerprints for identity: %s" % identity)
XcodeSettings._codesigning_key_cache[identity] = fingerprint
return XcodeSettings._codesigning_key_cache.get(identity, '')
def AddImplicitPostbuilds(self, configname, output, output_binary,
postbuilds=[], quiet=False):
"""Returns a list of shell commands that should run before and after
|postbuilds|."""
assert output_binary is not None
pre = self._GetTargetPostbuilds(configname, output, output_binary, quiet)
post = self._GetIOSPostbuilds(configname, output_binary)
return pre + postbuilds + post
def _AdjustLibrary(self, library, config_name=None):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
sdk_root = self._SdkPath(config_name)
if not sdk_root:
sdk_root = ''
return l.replace('$(SDKROOT)', sdk_root)
def AdjustLibraries(self, libraries, config_name=None):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [self._AdjustLibrary(library, config_name)
for library in libraries]
return libraries
def _BuildMachineOSBuild(self):
return GetStdout(['sw_vers', '-buildVersion'])
def _XcodeIOSDeviceFamily(self, configname):
family = self.xcode_settings[configname].get('TARGETED_DEVICE_FAMILY', '1')
return [int(x) for x in family.split(',')]
def GetExtraPlistItems(self, configname=None):
"""Returns a dictionary with extra items to insert into Info.plist."""
if configname not in XcodeSettings._plist_cache:
cache = {}
cache['BuildMachineOSBuild'] = self._BuildMachineOSBuild()
xcode, xcode_build = XcodeVersion()
cache['DTXcode'] = xcode
cache['DTXcodeBuild'] = xcode_build
sdk_root = self._SdkRoot(configname)
if not sdk_root:
sdk_root = self._DefaultSdkRoot()
cache['DTSDKName'] = sdk_root
if xcode >= '0430':
cache['DTSDKBuild'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductBuildVersion')
else:
cache['DTSDKBuild'] = cache['BuildMachineOSBuild']
if self.isIOS:
cache['DTPlatformName'] = cache['DTSDKName']
if configname.endswith("iphoneos"):
cache['DTPlatformVersion'] = self._GetSdkVersionInfoItem(
sdk_root, 'ProductVersion')
cache['CFBundleSupportedPlatforms'] = ['iPhoneOS']
else:
cache['CFBundleSupportedPlatforms'] = ['iPhoneSimulator']
XcodeSettings._plist_cache[configname] = cache
# Include extra plist items that are per-target, not per global
# XcodeSettings.
items = dict(XcodeSettings._plist_cache[configname])
if self.isIOS:
items['UIDeviceFamily'] = self._XcodeIOSDeviceFamily(configname)
return items
def _DefaultSdkRoot(self):
"""Returns the default SDKROOT to use.
Prior to version 5.0.0, if SDKROOT was not explicitly set in the Xcode
project, then the environment variable was empty. Starting with this
version, Xcode uses the name of the newest SDK installed.
"""
xcode_version, xcode_build = XcodeVersion()
if xcode_version < '0500':
return ''
default_sdk_path = self._XcodeSdkPath('')
default_sdk_root = XcodeSettings._sdk_root_cache.get(default_sdk_path)
if default_sdk_root:
return default_sdk_root
try:
all_sdks = GetStdout(['xcodebuild', '-showsdks'])
except:
# If xcodebuild fails, there will be no valid SDKs
return ''
for line in all_sdks.splitlines():
items = line.split()
if len(items) >= 3 and items[-2] == '-sdk':
sdk_root = items[-1]
sdk_path = self._XcodeSdkPath(sdk_root)
if sdk_path == default_sdk_path:
return sdk_root
return ''
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def XcodeVersion():
"""Returns a tuple of version and build version of installed Xcode."""
# `xcodebuild -version` output looks like
# Xcode 4.6.3
# Build version 4H1503
# or like
# Xcode 3.2.6
# Component versions: DevToolsCore-1809.0; DevToolsSupport-1806.0
# BuildVersion: 10M2518
# Convert that to '0463', '4H1503'.
global XCODE_VERSION_CACHE
if XCODE_VERSION_CACHE:
return XCODE_VERSION_CACHE
try:
version_list = GetStdout(['xcodebuild', '-version']).splitlines()
# In some circumstances xcodebuild exits 0 but doesn't return
# the right results; for example, a user on 10.7 or 10.8 with
# a bogus path set via xcode-select
# In that case this may be a CLT-only install so fall back to
# checking that version.
if len(version_list) < 2:
raise GypError("xcodebuild returned unexpected results")
except:
version = CLTVersion()
if version:
version = re.match(r'(\d\.\d\.?\d*)', version).groups()[0]
else:
raise GypError("No Xcode or CLT version detected!")
# The CLT has no build information, so we return an empty string.
version_list = [version, '']
version = version_list[0]
build = version_list[-1]
# Be careful to convert "4.2" to "0420":
version = version.split()[-1].replace('.', '')
version = (version + '0' * (3 - len(version))).zfill(4)
if build:
build = build.split()[-1]
XCODE_VERSION_CACHE = (version, build)
return XCODE_VERSION_CACHE
# This function ported from the logic in Homebrew's CLT version check
def CLTVersion():
"""Returns the version of command-line tools from pkgutil."""
# pkgutil output looks like
# package-id: com.apple.pkg.CLTools_Executables
# version: 5.0.1.0.1.1382131676
# volume: /
# location: /
# install-time: 1382544035
# groups: com.apple.FindSystemFiles.pkg-group com.apple.DevToolsBoth.pkg-group com.apple.DevToolsNonRelocatableShared.pkg-group
STANDALONE_PKG_ID = "com.apple.pkg.DeveloperToolsCLILeo"
FROM_XCODE_PKG_ID = "com.apple.pkg.DeveloperToolsCLI"
MAVERICKS_PKG_ID = "com.apple.pkg.CLTools_Executables"
regex = re.compile('version: (?P<version>.+)')
for key in [MAVERICKS_PKG_ID, STANDALONE_PKG_ID, FROM_XCODE_PKG_ID]:
try:
output = GetStdout(['/usr/sbin/pkgutil', '--pkg-info', key])
return re.search(regex, output).groupdict()['version']
except:
continue
def GetStdout(cmdlist):
"""Returns the content of standard output returned by invoking |cmdlist|.
Raises |GypError| if the command return with a non-zero return code."""
job = subprocess.Popen(cmdlist, stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running %s' % (job.returncode, cmdlist[0]))
return out.rstrip('\n')
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = os.path.splitext(output)[0] + '.nib'
# Compiled storyboard files are referred to by .storyboardc.
if output.endswith('.storyboard'):
output = os.path.splitext(output)[0] + '.storyboardc'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerConfigSetting('SDKROOT', configuration):
env['SDKROOT'] = xcode_settings._SdkPath(configuration)
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if XcodeVersion() >= '0500' and not env.get('SDKROOT'):
sdk_root = xcode_settings._SdkRoot(configuration)
if not sdk_root:
sdk_root = xcode_settings._XcodeSdkPath('')
if sdk_root is None:
sdk_root = ''
env['SDKROOT'] = sdk_root
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
def _HasIOSTarget(targets):
"""Returns true if any target contains the iOS specific key
IPHONEOS_DEPLOYMENT_TARGET."""
for target_dict in targets.values():
for config in target_dict['configurations'].values():
if config.get('xcode_settings', {}).get('IPHONEOS_DEPLOYMENT_TARGET'):
return True
return False
def _AddIOSDeviceConfigurations(targets):
"""Clone all targets and append -iphoneos to the name. Configure these targets
to build for iOS devices and use correct architectures for those builds."""
for target_dict in targets.itervalues():
toolset = target_dict['toolset']
configs = target_dict['configurations']
for config_name, config_dict in dict(configs).iteritems():
iphoneos_config_dict = copy.deepcopy(config_dict)
configs[config_name + '-iphoneos'] = iphoneos_config_dict
configs[config_name + '-iphonesimulator'] = config_dict
if toolset == 'target':
iphoneos_config_dict['xcode_settings']['SDKROOT'] = 'iphoneos'
return targets
def CloneConfigurationForDeviceAndEmulator(target_dicts):
"""If |target_dicts| contains any iOS targets, automatically create -iphoneos
targets for iOS device builds."""
if _HasIOSTarget(target_dicts):
return _AddIOSDeviceConfigurations(target_dicts)
return target_dicts
| mit |
alexbredo/ipfix-receiver | urllib3/filepost.py | 1009 | 2281 | import codecs
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
from .fields import RequestField
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def iter_field_objects(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts, and lists of
:class:`~urllib3.fields.RequestField`.
"""
if isinstance(fields, dict):
i = six.iteritems(fields)
else:
i = iter(fields)
for field in i:
if isinstance(field, RequestField):
yield field
else:
yield RequestField.from_tuples(*field)
def iter_fields(fields):
"""
.. deprecated:: 1.6
Iterate over fields.
The addition of :class:`~urllib3.fields.RequestField` makes this function
obsolete. Instead, use :func:`iter_field_objects`, which returns
:class:`~urllib3.fields.RequestField` objects.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, :class:`~urllib3.fields.RequestField`).
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for field in iter_field_objects(fields):
body.write(b('--%s\r\n' % (boundary)))
writer(body).write(field.render_headers())
data = field.data
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = str('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| bsd-2-clause |
jbking/demo-appengine-django-golang | myproject/django/contrib/localflavor/ie/ie_counties.py | 503 | 1127 | """
Sources:
Irish Counties: http://en.wikipedia.org/wiki/Counties_of_Ireland
"""
from django.utils.translation import ugettext_lazy as _
IE_COUNTY_CHOICES = (
('antrim', _('Antrim')),
('armagh', _('Armagh')),
('carlow', _('Carlow')),
('cavan', _('Cavan')),
('clare', _('Clare')),
('cork', _('Cork')),
('derry', _('Derry')),
('donegal', _('Donegal')),
('down', _('Down')),
('dublin', _('Dublin')),
('fermanagh', _('Fermanagh')),
('galway', _('Galway')),
('kerry', _('Kerry')),
('kildare', _('Kildare')),
('kilkenny', _('Kilkenny')),
('laois', _('Laois')),
('leitrim', _('Leitrim')),
('limerick', _('Limerick')),
('longford', _('Longford')),
('louth', _('Louth')),
('mayo', _('Mayo')),
('meath', _('Meath')),
('monaghan', _('Monaghan')),
('offaly', _('Offaly')),
('roscommon', _('Roscommon')),
('sligo', _('Sligo')),
('tipperary', _('Tipperary')),
('tyrone', _('Tyrone')),
('waterford', _('Waterford')),
('westmeath', _('Westmeath')),
('wexford', _('Wexford')),
('wicklow', _('Wicklow')),
)
| mit |
CalebBell/ht | ht/conv_packed_bed.py | 1 | 8259 | # -*- coding: utf-8 -*-
'''Chemical Engineering Design Library (ChEDL). Utilities for process modeling.
Copyright (C) 2016, Caleb Bell <Caleb.Andrew.Bell@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.'''
from __future__ import division
__all__ = ['Nu_packed_bed_Gnielinski', 'Nu_Wakao_Kagei', 'Nu_Achenbach',
'Nu_KTA']
def Nu_packed_bed_Gnielinski(dp, voidage, vs, rho, mu, Pr, fa=None):
r'''Calculates Nusselt number of a fluid passing over a bed of particles
using a correlation shown in [3]_ and cited as from [1]_ and [2]_. Likely
the best available model as the author of [1]_ is the same as [2]_ and
[3]_.
.. math::
Nu = f_a Nu_{sphere}
.. math::
Nu_{sphere} = 2 + \sqrt{Nu_{m,lam}^2 + Nu_{m,turb}^2}
.. math::
Nu_{m,lam} = 0.664Re^{0.5} Pr^{1/3}
.. math::
Nu_{m,turb} = \frac{0.037Re^{0.8} Pr}{1 + 2.443Re^{-0.1}(Pr^{2/3} -1)}
.. math::
Re = \frac{\rho v_s d_p}{\mu \epsilon}
Parameters
----------
dp : float
Equivalent spherical particle diameter of packing [m]
voidage : float
Void fraction of bed packing [-]
vs : float
Superficial velocity of the fluid [m/s]
rho : float
Density of the fluid [kg/m^3]
mu : float
Viscosity of the fluid, [Pa*s]
Pr : float
Prandtl number of the fluid []
fa : float, optional
Fator increasing heat transfer []
Returns
-------
Nu : float
Nusselt number for heat transfer to the packed bed [-]
Notes
-----
`fa` is a factor relating how much more heat transfer happens than would
normally, around one sphere. For spheres of the same size,
:math:`f_a = 1 + 1.5(1-\epsilon)`. For cylinders with l/d ratio of
0.24 < l/d < 1.2 use fa = 1.6. For cubes, use fa = 1.6 For Raschig rings,
use `fa` = 2.1 For Berl saddles, use `fa` = 2.3. fa is calculated with
the relationship for spheres if not provided.
Confirmed with experimental data for a range of :math:`1E-1 < Re <1,000`
and :math:`0.4 < Pr < 1000` for spheres. Limits are smaller for other
shapes.
Examples
--------
>>> Nu_packed_bed_Gnielinski(dp=8E-4, voidage=0.4, vs=1, rho=1E3, mu=1E-3, Pr=0.7)
61.37823202546954
References
----------
.. [1] Gnielinski, V. (1981) "Equations for the calculation of heat and
mass transfer during flow through stationary spherical packings at
moderate and high Peclet numbers". International Chemical Engineering
21 (3): 378-383
.. [2] Gnielinski, V. (1982) "Berechnung des Warmeund Stoffaustauschs in
durchstomten ruhenden Schuttungen". Verfahrenstechnik 16(1): 36-39
.. [3] Gnielinski, V. in G esellschaft, V. D. I., ed. VDI Heat Atlas.
2nd ed. 2010 edition. Berlin; New York: Springer, 2010.
'''
Re = rho*vs*dp/mu/voidage
Nu_lam = 0.664*Re**0.5*Pr**(1/3.)
Nu_turb = 0.037*Re**0.8*Pr/(1 + 2.443*Re**-0.1*(Pr**(2/3.)-1))
Nu_sphere = 2 + (Nu_lam**2 + Nu_turb**2)**0.5
if fa is None:
fa = 1.0 + 1.5*(1.0 - voidage)
return fa*Nu_sphere
def Nu_Wakao_Kagei(Re, Pr):
r'''Calculates Nusselt number of a fluid passing over a bed of particles
using a correlation shown in [1]_ and also cited in the review of [2]_.
Relatively rough, as it has no dependence on voidage.
.. math::
Nu = 2 + 1.1Pr^{1/3}Re^{0.6}
Parameters
----------
Re : float
Reynolds number with pebble diameter as characteristic dimension, [-]
Pr : float
Prandtl number of the fluid []
Returns
-------
Nu : float
Nusselt number for heat transfer to the packed bed [-]
Notes
-----
Fit for Re from 3 to 3000; claimed reasonableness of fit to to 1E6.
Examples
--------
>>> Nu_Wakao_Kagei(2000, 0.7)
95.40641328041248
References
----------
.. [1] Wakao, Noriaki, and Seiichirō Kagei. Heat and Mass Transfer in
Packed Beds. Taylor & Francis, 1982.
.. [2] Abdulmohsin, Rahman S., and Muthanna H. Al-Dahhan. "Characteristics
of Convective Heat Transport in a Packed Pebble-Bed Reactor." Nuclear
Engineering and Design 284 (April 1, 2015): 143-52.
doi:10.1016/j.nucengdes.2014.11.041.
'''
return 2 + 1.1*Pr**(1/3.)*Re**0.6
def Nu_Achenbach(Re, Pr, voidage):
r'''Calculates Nusselt number of a fluid passing over a bed of particles
using a correlation shown in [1]_ and also cited in the review of [2]_.
.. math::
Nu = [(1.18Re^{0.58})^4 + (0.23\left(\frac{Re}{1-\epsilon}
\right)^{0.75})^4]^{0.25}
Parameters
----------
Re : float
Reynolds number with pebble diameter as characteristic dimension, [-]
Pr : float
Prandtl number of the fluid []
voidage : float
Void fraction of bed packing [-]
Returns
-------
Nu : float
Nusselt number for heat transfer to the packed bed [-]
Notes
-----
Claimed value for Re/ε < 7.7E5
Developed with tests performed in a wind tunnel at conditions up to 30 bar.
Examples
--------
>>> Nu_Achenbach(2000, 0.7, 0.4)
117.70343608599121
References
----------
.. [1] Achenbach, E. "Heat and Flow Characteristics of Packed Beds."
Experimental Thermal and Fluid Science 10, no. 1 (January 1, 1995):
17-27. doi:10.1016/0894-1777(94)00077-L.
.. [2] Abdulmohsin, Rahman S., and Muthanna H. Al-Dahhan. "Characteristics
of Convective Heat Transport in a Packed Pebble-Bed Reactor." Nuclear
Engineering and Design 284 (April 1, 2015): 143-52.
doi:10.1016/j.nucengdes.2014.11.041.
'''
return ((1.18*Re**0.58)**4 + (0.23*(Re/(1-voidage))**0.75)**4)**0.25
def Nu_KTA(Re, Pr, voidage):
r'''Calculates Nusselt number of a fluid passing over a bed of particles
using a correlation shown in [1]_ and also cited in the review of [2]_.
.. math::
Nu = 1.27\frac{Pr^{1/3}}{\epsilon^{1.18}}Re^{0.36}
+ 0.033\frac{Pr^{0.5}}{\epsilon^{1.07}}Re^{0.86}
Parameters
----------
Re : float
Reynolds number with pebble diameter as characteristic dimension, [-]
Pr : float
Prandtl number of the fluid [-]
voidage : float
Void fraction of bed packing [-]
Returns
-------
Nu : float
Nusselt number for heat transfer to the packed bed [-]
Notes
-----
100 < Re < 1E5;
0.36 < ε < 0.42;
D/d > 20 with D as bed diameter, d as particle diameter;
H > 4d with H as bed height.
Examples
--------
>>> Nu_KTA(2000, 0.7, 0.4)
102.08516480718129
References
----------
.. [1] Reactor Core Design of High-Temperature Gas-Cooled Reactors Part 2:
Heat Transfer in Spherical Fuel Elements (June 1983).
http://www.kta-gs.de/e/standards/3100/3102_2_engl_1983_06.pdf
.. [2] Abdulmohsin, Rahman S., and Muthanna H. Al-Dahhan. "Characteristics
of Convective Heat Transport in a Packed Pebble-Bed Reactor." Nuclear
Engineering and Design 284 (April 1, 2015): 143-52.
doi:10.1016/j.nucengdes.2014.11.041.
'''
return (1.27*Pr**(1/3.)*Re**0.36/voidage**1.18
+ 0.033*Pr**0.5/voidage**1.07*Re**0.86)
| mit |
ashmanmode/TTSDNNRepo | src/frontend/merge_features.py | 2 | 2200 |
import numpy, sys
from io_funcs.binary_io import BinaryIOCollection
import logging
class MergeFeat(object):
def __init__(self, lab_dim = 481, feat_dim = 1):
self.logger = logging.getLogger("labels")
self.lab_dim = lab_dim
self.feat_dim = feat_dim
def merge_data(self, binary_label_file_list, new_feat_file_list, out_feat_file_list):
'''
merging new features with normalised label features
'''
utt_number = len(new_feat_file_list)
if utt_number != len(binary_label_file_list):
print "the number of new feature input files and label files should be the same!\n";
sys.exit(1)
new_feat_ext = new_feat_file_list[0].split('/')[-1].split('.')[1]
io_funcs = BinaryIOCollection()
for i in xrange(utt_number):
lab_file_name = binary_label_file_list[i]
new_feat_file_name = new_feat_file_list[i]
out_feat_file_name = out_feat_file_list[i]
lab_features, lab_frame_number = io_funcs.load_binary_file_frame(lab_file_name, self.lab_dim)
new_features, feat_frame_number = io_funcs.load_binary_file_frame(new_feat_file_name, self.feat_dim)
if (lab_frame_number - feat_frame_number)>5:
base_file_name = new_feat_file_list[i].split('/')[-1].split('.')[0]
self.logger.critical("the number of frames in label and new features are different: %d vs %d (%s)" %(lab_frame_number, feat_frame_number, base_file_name))
raise
merged_features = numpy.zeros((lab_frame_number, self.lab_dim+self.feat_dim))
merged_features[0:lab_frame_number, 0:self.lab_dim] = lab_features
merged_features[0:feat_frame_number, self.lab_dim:self.lab_dim+self.feat_dim] = new_features[0:lab_frame_number, ]
io_funcs.array_to_binary_file(merged_features, out_feat_file_name)
self.logger.debug('merged new feature %s of %d frames with %d label features' % (new_feat_ext, feat_frame_number,lab_frame_number) )
| apache-2.0 |
vcshing/gae-pytz | setup.py | 11 | 1712 | """
gae-pytz
========
pytz has a severe performance problem that impedes its usage on Google App
Engine. This is caused because pytz.__init__ builds a list of available
zoneinfos checking the entire zoneinfo database (which means: it tries to open
hundreds of files). This is done in the module globals, so it is not easily
avoidable. And it is far from ideal to do this in App Engine - app
initialization becomes unacceptable if every time we import pytz it checks
500+ files.
In this alternative version, pytz is highly optimized for App Engine, following
ideas from several recipes around:
- database files are not automatically reads when the module is imported
- the database files are loaded using zipimport to reduce number of files
- it uses memcache to cache already loaded zoneinfos
This results in almost unnoticeable load time and makes pytz usable on App
Engine.
"""
from setuptools import setup, find_packages
setup(
name='gaepytz',
version='2011h',
url='http://code.google.com/p/gae-pytz/',
license='MIT',
author='Rodrigo Moraes',
author_email='rodrigo.moraes@gmail.com',
description='A version of pytz that works well on Google App Engine.',
long_description=__doc__,
zip_safe=False,
platforms='any',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['pytz'],
include_package_data=True,
package_data={'': ['*.zip']},
)
| mit |
Moriadry/tensorflow | tensorflow/compiler/tests/tensor_array_ops_test.py | 7 | 36344 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA TensorArray Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def _make_converter(dtype):
def _converter(x):
return np.asarray(x).astype(dtype.as_numpy_dtype)
return _converter
class TensorArrayTest(xla_test.XLATestCase):
def testTensorArrayWriteRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [[1.0, 3.0]])
w2 = w1.write(2, [[7.0, -8.5]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual([[4.0, 5.0]], d0)
self.assertAllEqual([[1.0, 3.0]], d1)
self.assertAllEqual([[7.0, -8.5]], d2)
def _testTensorArrayWritePack(self, tf_dtype):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0]]))
w1 = w0.write(1, convert([[6.0, 7.0]]))
w2 = w1.write(2, convert([[8.0, 9.0]]))
c0 = w2.stack()
self.assertAllEqual(
convert([[[4.0, 5.0]], [[6.0, 7.0]], [[8.0, 9.0]]]), c0.eval())
def testTensorArrayWritePack(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWritePack(dtype)
def testEmptyTensorArrayPack(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
empty_element = np.zeros((0, 1), dtype=np.float32)
w0 = ta.write(0, empty_element)
w1 = w0.write(1, empty_element)
w2 = w1.write(2, empty_element)
c0 = w2.stack()
self.assertAllEqual([3, 0, 1], c0.eval().shape)
def _testTensorArrayWriteConcat(self, tf_dtype):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
w0 = ta.write(0, convert([[4.0, 5.0], [104.0, 105.0]]))
w1 = w0.write(1, convert([[6.0, 7.0], [106.0, 107.0]]))
w2 = w1.write(2, convert([[8.0, 9.0], [204.0, 205.0]]))
c0 = w2.concat()
self.assertAllEqual(
convert([[4.0, 5.0], [104.0, 105.0], [6.0, 7.0],
[106.0, 107.0], [8.0, 9.0], [204.0, 205.0]]), c0.eval())
def testTensorArrayWriteConcat(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteConcat(dtype)
def _testTensorArrayUnpackRead(self, tf_dtype):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
# Unpack a vector into scalars
w0 = ta.unstack(convert([1.0, 2.0, 3.0]))
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert(1.0), d0)
self.assertAllEqual(convert(2.0), d1)
self.assertAllEqual(convert(3.0), d2)
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Unpack a matrix into vectors.
w1 = ta.unstack(convert([[1.0, 1.1], [2.0, 2.1], [3.0, 3.1]]))
r0 = w1.read(0)
r1 = w1.read(1)
r2 = w1.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0, 1.1]), d0)
self.assertAllEqual(convert([2.0, 2.1]), d1)
self.assertAllEqual(convert([3.0, 3.1]), d2)
# Reset ta because we're going to change the shape, else shape
# inference will throw an error.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
# Try unpacking an empty matrix, which should not cause an error.
w2 = ta.unstack(convert([[], [], []]))
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
def _testTensorArrayUnpackReadMaybeLegacy(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayUnpackRead(dtype)
def testTensorArrayUnpackRead(self):
self._testTensorArrayUnpackReadMaybeLegacy()
def _testTensorArraySplitRead(self, tf_dtype):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
convert = _make_converter(tf_dtype)
# Split an empty vector.
lengths = constant_op.constant([0, 0, 0])
w0 = ta.split(convert([]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([]), d0)
self.assertAllEqual(convert([]), d1)
self.assertAllEqual(convert([]), d2)
# Split a vector.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(convert([1.0, 2.0, 3.0]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([1.0]), d0)
self.assertAllEqual(convert([2.0]), d1)
self.assertAllEqual(convert([3.0]), d2)
# Split a matrix.
ta = tensor_array_ops.TensorArray(
dtype=tf_dtype, tensor_array_name="foo", size=3)
lengths = constant_op.constant([1, 1, 1])
w0 = ta.split(
convert([[1.0, 101.0], [2.0, 201.0], [3.0, 301.0]]), lengths=lengths)
r0 = w0.read(0)
r1 = w0.read(1)
r2 = w0.read(2)
d0, d1, d2 = session.run([r0, r1, r2])
self.assertAllEqual(convert([[1.0, 101.0]]), d0)
self.assertAllEqual(convert([[2.0, 201.0]]), d1)
self.assertAllEqual(convert([[3.0, 301.0]]), d2)
def testTensorArraySplitRead(self):
for dtype in self.numeric_tf_types:
self._testTensorArraySplitRead(dtype)
def testTensorGradArrayWriteRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad")
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2 = session.run([r0, r1, r2, g_r0, g_r1, g_r2])
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
def testTensorGradArrayDynamicWriteRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3)
w0 = ta.write(0, [[4.0]])
w1 = w0.write(1, [[1.0]])
w2 = w1.write(2, [[-3.0]])
g_ta = w2.grad("grad") # Get gradient array here so we know the shape
s = w2.size()
g_s = g_ta.size()
g_w0 = g_ta.write(0, [[5.0]])
g_w1 = g_w0.write(1, [[2.0]])
g_w2 = g_w1.write(2, [[-2.0]])
r0 = w2.read(0)
r1 = w2.read(1)
r2 = w2.read(2)
g_r0 = g_w2.read(0)
g_r1 = g_w2.read(1)
g_r2 = g_w2.read(2)
d0, d1, d2, g_d0, g_d1, g_d2, vs, g_vs = session.run(
[r0, r1, r2, g_r0, g_r1, g_r2, s, g_s])
self.assertAllEqual([[4.0]], d0)
self.assertAllEqual([[1.0]], d1)
self.assertAllEqual([[-3.0]], d2)
self.assertAllEqual([[5.0]], g_d0)
self.assertAllEqual([[2.0]], g_d1)
self.assertAllEqual([[-2.0]], g_d2)
self.assertAllEqual(3, vs)
self.assertAllEqual(3, g_vs)
def testTensorGradAccessTwiceReceiveSameObject(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3,
element_shape=[1, 2])
g_ta_0 = ta.grad("grad")
g_ta_1 = ta.grad("grad")
with ops.control_dependencies([g_ta_0.write(0, [[4.0, 5.0]]).flow]):
# Write with one gradient handle, read with another copy of it
r1_0 = g_ta_1.read(0)
t_g_ta_0, t_g_ta_1, d_r1_0 = session.run(
[g_ta_0.handle.op, g_ta_1.handle.op, r1_0])
self.assertAllEqual(t_g_ta_0, t_g_ta_1)
self.assertAllEqual([[4.0, 5.0]], d_r1_0)
def testTensorArrayWriteWrongIndexOrDataTypeFails(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
# Test writing the wrong datatype.
with self.assertRaisesOpError(
"TensorArray dtype is float but op has dtype int32"):
ta.write(-1, np.int32(7)).flow.eval()
def testTensorArrayReadWrongIndexOrDataTypeFails(self):
# Find two different floating point types, create an array of
# the first type, but try to read the other type.
if len(self.float_types) > 1:
dtype1 = self.float_types[0]
dtype2 = self.float_types[1]
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtype1, tensor_array_name="foo", size=3)
w0 = ta.write(0, [[4.0, 5.0]])
# Test reading wrong datatype.
r0_bad = gen_data_flow_ops._tensor_array_read_v3(
handle=w0.handle, index=0, dtype=dtype2, flow_in=w0.flow)
with self.assertRaisesOpError("TensorArray dtype is "):
r0_bad.eval()
# Test reading from a different index than the one we wrote to
w0.read(1)
def testTensorArraySplitIncompatibleShapesFails(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
with self.assertRaisesOpError(
r"value is not 1D"):
lengths = array_ops.placeholder(dtypes.int64)
ta.split([1.0, 2.0, 3.0], lengths).flow.eval(feed_dict={lengths: 1})
with self.assertRaisesOpError(
r"lengths must be equal: 1 vs. 2"):
ta.split([1.0, 2.0, 3.0], [1, 2, 3]).flow.eval()
with self.assertRaisesOpError(
r"value must have rank >= 1"):
ta.split(1.0, [1]).flow.eval()
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
infer_shape=False)
with self.assertRaisesOpError(
r"TensorArray's size is not equal to the size of lengths "
r"\(1 vs. 2\)"):
ta.split([1.0], [1]).flow.eval()
def _testTensorArrayWriteGradientAddMultipleAdds(self, dtype):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtype, tensor_array_name="foo", size=3, infer_shape=False)
c = lambda x: np.asarray(x, dtype=dtype.as_numpy_dtype)
w0 = ta.write(2, c(3.0))
w1 = w0.write(2, c(4.0))
ta_grad = w1.grad("grad")
w0_grad = ta_grad.write(2, c(3.0))
w1_grad = w0_grad.write(2, c(4.0))
w2_grad = w1_grad.write(2, c(5.0))
# Assert that aggregation works correctly
self.assertAllEqual(c(12.00), w2_grad.read(2).eval())
# Using differing shapes causes an exception
wb0_grad = ta_grad.write(1, c(1.0))
wb1_grad = wb0_grad.write(1, c([1.0]))
with self.assertRaisesOpError(
r"Mismatched TensorArray sizes"):
wb1_grad.flow.eval()
def testTensorArrayWriteGradientAddMultipleAdds(self):
for dtype in self.numeric_tf_types:
self._testTensorArrayWriteGradientAddMultipleAdds(dtype)
def testMultiTensorArray(self):
with self.test_session(), self.test_scope():
h1 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="foo")
w1 = h1.write(0, 4.0)
r1 = w1.read(0)
h2 = tensor_array_ops.TensorArray(
size=1, dtype=dtypes.float32, tensor_array_name="bar")
w2 = h2.write(0, 5.0)
r2 = w2.read(0)
r = r1 + r2
self.assertAllClose(9.0, r.eval())
def _testTensorArrayGradientWriteReadType(self, dtype):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.as_dtype(dtype),
tensor_array_name="foo",
size=3,
infer_shape=False)
c = lambda x: np.array(x, dtype=dtype)
value_0 = constant_op.constant(c([[4.0, 5.0]]))
value_1 = constant_op.constant(c([[3.0, 3.5]]))
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
r0 = w1.read(0)
r1 = w1.read(1)
r0_2 = w1.read(0)
# Test individual components' gradients
grad_just_r0 = gradients_impl.gradients(
ys=[r0], xs=[value_0], grad_ys=[c([[2.0, 3.0]])])
grad_just_r0_vals = session.run(grad_just_r0)
self.assertAllEqual(c([[2.0, 3.0]]), grad_just_r0_vals[0])
grad_r0_r0_2 = gradients_impl.gradients(
ys=[r0, r0_2],
xs=[value_0],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]])])
grad_r0_r0_2_vals = session.run(grad_r0_r0_2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_r0_r0_2_vals[0])
grad_just_r1 = gradients_impl.gradients(
ys=[r1], xs=[value_1], grad_ys=[c([[-2.0, -4.0]])])
grad_just_r1_vals = session.run(grad_just_r1)
self.assertAllEqual(c([[-2.0, -4.0]]), grad_just_r1_vals[0])
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r0, r0_2, r1],
xs=[value_0, value_1],
grad_ys=[c([[2.0, 3.0]]), c([[1.0, -1.0]]), c([[-2.0, -10.0]])])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 2)
self.assertAllEqual(c([[3.0, 2.0]]), grad_vals[0])
self.assertAllEqual(c([[-2.0, -10.0]]), grad_vals[1])
def testTensorArrayGradientWriteRead(self):
for dtype in self.numeric_types:
self._testTensorArrayGradientWriteReadType(dtype)
def _testTensorArrayGradientWritePackConcatAndRead(self):
with self.test_session() as sess, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value_0 = constant_op.constant([-1.0, 1.0])
value_1 = constant_op.constant([-10.0, 10.0])
w0 = ta.write(0, value_0)
w1 = w0.write(1, value_1)
p0 = w1.stack()
r0 = w1.read(0)
s0 = w1.concat()
# Test gradient accumulation between read(0), pack(), and concat().
with ops.control_dependencies([p0, r0, s0]):
grad_r = gradients_impl.gradients(
ys=[p0, r0, s0],
xs=[value_0, value_1],
grad_ys=[
[[2.0, 3.0], [4.0, 5.0]], # stack gradient
[-0.5, 1.5], # read(0) gradient
[20.0, 30.0, 40.0, 50.0], # concat gradient
])
grad_vals = sess.run(grad_r) # 2 + 2 entries
self.assertAllClose([2.0 - 0.5 + 20.0, 3.0 + 1.5 + 30.0], grad_vals[0])
self.assertAllEqual([4.0 + 40.0, 5.0 + 50.0], grad_vals[1])
def testTensorArrayGradientWritePackConcatAndRead(self):
self._testTensorArrayGradientWritePackConcatAndRead()
def testTensorArrayReadTwice(self):
with self.test_session(), self.test_scope():
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
ta_readtwice = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
w_readtwice = ta_readtwice.unstack(value)
r0_readtwice = w_readtwice.read(0)
with ops.control_dependencies([r0_readtwice]):
r1_readtwice = w_readtwice.read(0)
self.assertAllEqual([1.0, -1.0], r1_readtwice.eval())
def _testTensorArrayGradientUnpackRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=2,
clear_after_read=False)
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.unstack(value)
r0 = w.read(0)
r0_1 = w.read(0)
r1 = w.read(1)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[r0, r0_1, r1],
xs=[value],
grad_ys=[[2.0, 3.0], [-1.5, 1.5], [4.0, 5.0]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0 - 1.5, 3.0 + 1.5], [4.0, 5.0]], grad_vals[0])
def testTensorArrayGradientUnpackRead(self):
self._testTensorArrayGradientUnpackRead()
def testTensorArrayGradientSplitConcat(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=2)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0], [1000.0, -1000.0]])
w = ta.split(value, [2, 2])
r = w.concat()
# Test combined gradients
grad = gradients_impl.gradients(
ys=[r],
xs=[value],
grad_ys=[[[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]]])
grad_vals = session.run(grad)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[2.0, -2.0], [20.0, -20.0], [200.0, -200.0],
[2000.0, -2000.0]],
grad_vals[0])
def testCloseTensorArray(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = ta.close()
session.run(c1)
def testSizeTensorArray(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
s = ta.size()
self.assertAllEqual(3, s.eval())
def testWriteCloseTensorArray(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=False)
w0 = ta.write(0, [[4.0, 5.0]])
w1 = w0.write(1, [3.0])
w1.close().run() # Expected to run without problems
# TODO(phawkins): implement while loops.
# def _testWhileLoopWritePackGradients(self, dynamic_size, dtype):
# np_dtype = dtype.as_numpy_dtype
# with self.test_session() as session, self.test_scope():
# v0 = array_ops.identity(np.arange(3 * 5, dtype=np_dtype).reshape(3, 5))
# var = variables.Variable(np.arange(100, 105, dtype=np_dtype))
# state0 = array_ops.identity(np.array([1] * 5, dtype=np_dtype))
# ta = tensor_array_ops.TensorArray(
# dtype=dtype,
# tensor_array_name="foo",
# size=0 if dynamic_size else 3,
# dynamic_size=dynamic_size)
# time_0 = array_ops.identity(0)
# def body(time, ta_t, state):
# sliced = array_ops.slice(
# v0, begin=array_ops.stack([time, 0]), size=[1, -1])
# sliced = array_ops.squeeze(sliced)
# out = sliced + var + state
# state += sliced
# ta_t = ta_t.write(time, out)
# return (time + 1, ta_t, state)
# (unused_0, h_final, unused_2) = control_flow_ops.while_loop(
# cond=lambda time, unused_1, unused_2: time < 3,
# body=body,
# loop_vars=(time_0, ta, state0),
# shape_invariants=(time_0.get_shape(), tensor_shape.unknown_shape(),
# tensor_shape.unknown_shape()),
# parallel_iterations=3)
# vout = h_final.stack()
# grad_val = -np.arange(3 * 5, dtype=np_dtype).reshape(3, 5)
# v0_grad = gradients_impl.gradients([vout], [v0], [grad_val])[0]
# state0_grad = gradients_impl.gradients([vout], [state0], [grad_val])[0]
# var_grad = gradients_impl.gradients([vout], [var], [grad_val])[0]
# variables.global_variables_initializer().run()
# state0_t, var_t, v0_t, vout_t, v0_grad_t, var_grad_t, state0_grad_t = (
# session.run([state0, var, v0, vout, v0_grad, var_grad, state0_grad])
# )
# just_v0_grad_t, = session.run([v0_grad])
# # state = [ state0 | state0 + v0[0] | state0 + v0[0] + v0[1] ]
# # vout = [ v0[0] + var + state[0] |
# # v0[1] + var + state[1] |
# # v0[2] + var + state[2] ]
# # = [ v0[0] + var + state0 |
# # v0[1] + var + state0 + v0[0] |
# # v0[2] + var + state0 + v0[0] + v0[1] ]
# #
# # d(vout[0])/d(v0) = [1 | 0 | 0 ]
# # d(vout[1])/d(v0) = [1 | 1 | 0 ]
# # d(vout[2])/d(v0) = [1 | 1 | 1 ]
# # d(vout)/d(var) = [1 | 1 | 1]
# # d(vout)/d(state0) = [ 1 | 1 | 1 ]
# state_per_time = np.array(
# [state0_t, state0_t + v0_t[0, :],
# state0_t + v0_t[0, :] + v0_t[1, :]])
# # Compare forward prop
# self.assertAllClose(v0_t + var_t + state_per_time, vout_t)
# # Compare backward prop
# expected_v0_grad_t = np.array([
# grad_val[0, :] + grad_val[1, :] + grad_val[2, :],
# grad_val[1, :] + grad_val[2, :], grad_val[2, :]
# ])
# self.assertAllEqual(expected_v0_grad_t, v0_grad_t)
# self.assertAllEqual(expected_v0_grad_t, just_v0_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), var_grad_t)
# self.assertAllClose(grad_val.sum(axis=0), state0_grad_t)
# def testWhileLoopWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=False, dtype=dtypes.float32)
# # TODO(ebrevdo): re-enable when While supports non-float32 gradients.
# # self._testWhileLoopWritePackGradients(
# # dynamic_size=False, dtype=tf.int64)
# def testWhileLoopDynamicWritePackGradients(self):
# self._testWhileLoopWritePackGradients(
# dynamic_size=True, dtype=dtypes.float32)
# def testGradSerialTwoLoops(self):
# with self.test_session(), self.test_scope():
# num_steps = 100
# acc = tensor_array_ops.TensorArray(
# dtype=dtypes.float32,
# size=num_steps,
# clear_after_read=False,
# element_shape=tensor_shape.scalar())
# i = constant_op.constant(0, name="i")
# x = constant_op.constant(2.0, name="x")
# c = lambda i, acc: i < 5
# def b(i, acc):
# x1 = control_flow_ops.cond(
# math_ops.equal(i, 0), lambda: x,
# lambda: math_ops.multiply(acc.read(i - 1), 2.0))
# return i + 1, acc.write(i, x1)
# i1, acc1 = control_flow_ops.while_loop(c, b, [i, acc])
# z = constant_op.constant(0.0)
# def fn(i, acc):
# return i + 1, acc.write(i, z)
# _, acc2 = control_flow_ops.while_loop(lambda i, acc: i < num_steps, fn,
# [i1, acc1])
# r = acc2.stack()
# grad = gradients_impl.gradients(r, [x])[0]
# self.assertAllClose(31.0, grad.eval())
def testSumOfTwoReadVariablesWithoutRepeatGrad(self):
with self.test_session() as session, self.test_scope():
a = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1)
b = array_ops.identity(
np.arange(
3 * 5, dtype=np.float32).reshape(3, 5) + 1 + 3 * 5)
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
ta = ta.write(0, a, name="write_a")
ta = ta.write(1, b, name="write_b")
c = (
ta.read(
0, name="read_a_0") + # a + b
ta.read(
1, name="read_b_0"))
g0 = -(np.arange(3 * 5, dtype=np.float32).reshape(3, 5) + 1)
grad_a = gradients_impl.gradients([c], [a], [g0])[0] # d(a+b)/da = 1
grad_b = gradients_impl.gradients([c], [b], [g0])[0] # d(a+b)/db = 1
# Test gradients calculated individually
grad_a_t, = session.run([grad_a])
self.assertAllEqual(grad_a_t, g0)
grad_b_t, = session.run([grad_b])
self.assertAllEqual(grad_b_t, g0)
# Test gradients calculated jointly.
joint_grad_a_t, joint_grad_b_t = session.run([grad_a, grad_b])
self.assertAllEqual(joint_grad_a_t, g0)
self.assertAllEqual(joint_grad_b_t, g0)
def testWriteShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c0 = constant_op.constant([4.0, 5.0])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c1 = constant_op.constant([6.0, 7.0])
w1 = w0.write(1, c1)
r0 = w1.read(0)
r1 = w1.read(1)
self.assertAllEqual(c0.get_shape(), r0.get_shape())
self.assertAllEqual(c1.get_shape(), r1.get_shape())
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=3)
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w0.write(0, c2)
def testPartlyUnknownShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, tensor_array_name="foo", size=6)
c0 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual([None, None, None, 3], r0.get_shape().as_list())
c1 = array_ops.placeholder(dtypes.float32, [None, None, None, 3])
w1 = w0.write(1, c1)
r1 = w1.read(0)
self.assertAllEqual([None, None, None, 3], r1.get_shape().as_list())
# Writing less specific shape (doesn't change type.)
c2 = array_ops.placeholder(dtypes.float32, [None, None, None, None])
w2 = w1.write(2, c2)
r2 = w2.read(0)
self.assertAllEqual([None, None, None, 3], r2.get_shape().as_list())
# Writing more specific shape in one dimension and less specific in
# another.
c3 = array_ops.placeholder(dtypes.float32, [None, None, 2, None])
w3 = w2.write(3, c3)
r3 = w3.read(0)
self.assertAllEqual([None, None, 2, 3], r3.get_shape().as_list())
# Writing partly defined shape using TensorArray.scatter.
c4 = array_ops.placeholder(dtypes.float32, [2, None, 4, 2, 3])
w4 = w3.scatter([4, 5], c4)
r4 = w4.read(0)
self.assertAllEqual([None, 4, 2, 3], r4.get_shape().as_list())
# Writing fully defined shape using TensorArray.split.
c5 = array_ops.placeholder(dtypes.float32, [10, 4, 2, 3])
w5 = w4.split(c5, constant_op.constant([5, 5]))
r5 = w5.read(0)
self.assertAllEqual([5, 4, 2, 3], r5.get_shape().as_list())
def _testUnpackShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
infer_shape=True)
value = constant_op.constant(
[[1.0, -1.0], [10.0, -10.0], [100.0, -100.0]])
w0 = ta.unstack(value)
r0 = w0.read(0)
self.assertAllEqual((2,), r0.get_shape())
c1 = constant_op.constant([4.0, 5.0])
w1 = w0.write(3, c1)
r1 = w1.read(0)
self.assertAllEqual(c1.get_shape(), r1.get_shape())
c2 = constant_op.constant([4.0, 5.0, 6.0])
with self.assertRaises(ValueError):
w1.write(4, c2)
def testUnpackShape(self):
self._testUnpackShape()
def testSplitShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=0,
infer_shape=True)
value = constant_op.constant([[1.0, -1.0], [2.0, -2.0], [3.0, -3.0]])
w0 = ta.split(value, [1, 1, 1])
r0 = w0.read(0)
self.assertAllEqual((1, 2), r0.get_shape())
ta1 = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo1",
size=0,
infer_shape=True)
w0 = ta1.split(value, [1, 2])
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def testWriteUnknownShape(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=3,
infer_shape=True)
c0 = array_ops.placeholder(dtypes.float32)
w0 = ta.write(0, c0)
r0 = w0.read(0)
self.assertAllEqual(r0.get_shape(), tensor_shape.unknown_shape())
def _testGradientWhenNotAllComponentsRead(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2)
x = constant_op.constant([2.0, 3.0])
w = ta.unstack(x)
r0 = w.read(0)
# Calculate (dr0/dx0, dr0/dx1). since r0 = x0, gradients are (1, 0).
grad_r0 = gradients_impl.gradients(ys=[r0], xs=[x], grad_ys=[1.0])
grad_r0_vals = session.run(grad_r0)[0]
self.assertAllEqual(grad_r0_vals, [1.0, 0.0])
def testGradientWhenNotAllComponentsRead(self):
self._testGradientWhenNotAllComponentsRead()
def _testTensorArrayEvalEmpty(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=False)
with self.assertRaisesOpError(
"TensorArray has size zero, but element shape <unknown> is not fully "
"defined. Currently only static shapes are supported when packing "
"zero-size TensorArrays."):
ta.stack().eval()
def testTensorArrayEvalEmpty(self):
self._testTensorArrayEvalEmpty()
def _testTensorArrayEvalEmptyWithDefault(self):
with self.test_session(), self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=0, infer_shape=True)
self.assertEqual(0, ta.size().eval())
ta = ta.unstack(array_ops.zeros([0, 3, 5]))
packed = ta.stack()
self.assertAllEqual([0, 3, 5], packed.eval().shape)
# Concatenating zero tensors along their first dimension gives a
# first dimension of zero
self.assertAllEqual([0, 5], ta.concat().eval().shape)
def testTensorArrayEvalEmptyWithDefault(self):
self._testTensorArrayEvalEmptyWithDefault()
def testTensorArrayScatterReadAndGradients(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=10)
indices = constant_op.constant([1, 8])
value = constant_op.constant([[1.0, -1.0], [10.0, -10.0]])
w = ta.scatter(indices, value)
r0 = w.read(1)
r1 = w.read(8)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[r0, r1], xs=[value], grad_ys=[[2.0, 3.0], [4.0, 5.0]])
read_vals, grad_vals = session.run([[r0, r1], grad])
self.assertEqual(len(read_vals), 2)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([1.0, -1.0], read_vals[0])
self.assertAllEqual([10.0, -10.0], read_vals[1])
self.assertAllEqual([[2.0, 3.0], [4.0, 5.0]], grad_vals[0])
def testTensorArrayWriteGatherAndGradients(self):
with self.test_session() as session, self.test_scope():
ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32,
tensor_array_name="foo",
size=10)
values = constant_op.constant([[1.0 * x, -1.0 * x] for x in range(10)])
indices = constant_op.constant([1, 8])
w = ta.unstack(values)
g = w.gather(indices)
# Test combined gradients + aggregation of read(0).
grad = gradients_impl.gradients(
ys=[g], xs=[values], grad_ys=[[[2.0, 3.0], [4.0, 5.0]]])
g_vals, grad_vals = session.run([[g], grad])
# Gradients for 8 of the 10 unread components are zero.
expected_grad = np.zeros((10, 2))
expected_grad[1] = [2.0, 3.0]
expected_grad[8] = [4.0, 5.0]
self.assertEqual(len(g_vals), 1)
self.assertEqual(len(grad_vals), 1)
self.assertAllEqual([[1.0, -1.0], [8.0, -8.0]], g_vals[0])
self.assertAllEqual(expected_grad, grad_vals[0])
def testTensorArrayIdentity(self):
with self.test_session() as session, self.test_scope():
ta0 = tensor_array_ops.TensorArray(dtype=dtypes.float32, size=2,
infer_shape=False)
ta1 = tensor_array_ops.TensorArray(dtype=dtypes.int32, size=4,
infer_shape=True)
ta0 = ta0.write(0, 0.)
ta1 = ta1.write(0, 1)
v0 = resource_variable_ops.ResourceVariable(0)
v1 = resource_variable_ops.ResourceVariable(0)
with ops.control_dependencies([v0.assign_add(1)]):
ta0 = ta0.identity()
with ops.control_dependencies([v1.assign_add(1)]):
ta1 = ta1.identity()
read0 = ta0.read(0)
read1 = ta1.read(0)
size0 = ta0.size()
size1 = ta1.size()
# Tests correct properties on new TensorArrays.
self.assertEqual(dtypes.float32, ta0.dtype)
self.assertEqual(dtypes.int32, ta1.dtype)
self.assertEqual(tensor_shape.unknown_shape(), read0.get_shape())
self.assertEqual(tensor_shape.scalar(), read1.get_shape())
variables.global_variables_initializer().run()
read0_v, read1_v, size0_v, size1_v = session.run(
(read0, read1, size0, size1))
# Tests that the control dependencies was added and executed.
self.assertEqual(1, v0.eval())
self.assertEqual(1, v1.eval())
# Tests correct TensorArray.
self.assertEqual(read0_v, 0)
self.assertEqual(read1_v, 1)
self.assertEqual(size0_v, 2)
self.assertEqual(size1_v, 4)
if __name__ == "__main__":
test.main()
| apache-2.0 |
karlgluck/heroes-of-the-storm-replay-parser | s2protocol/protocol16755.py | 28 | 20109 | # Copyright (c) 2013 Blizzard Entertainment
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from decoders import *
# Decoding instructions for each protocol type.
typeinfos = [
('_int',[(0,7)]), #0
('_int',[(0,4)]), #1
('_int',[(0,6)]), #2
('_int',[(0,14)]), #3
('_int',[(0,22)]), #4
('_int',[(0,32)]), #5
('_choice',[(0,2),{0:('m_uint6',2),1:('m_uint14',3),2:('m_uint22',4),3:('m_uint32',5)}]), #6
('_int',[(0,5)]), #7
('_struct',[[('m_playerId',7,-1)]]), #8
('_blob',[(0,8)]), #9
('_int',[(0,8)]), #10
('_struct',[[('m_flags',10,0),('m_major',10,1),('m_minor',10,2),('m_revision',10,3),('m_build',5,4),('m_baseBuild',5,5)]]), #11
('_int',[(0,3)]), #12
('_struct',[[('m_signature',9,0),('m_version',11,1),('m_type',12,2),('m_elapsedGameLoops',5,3)]]), #13
('_fourcc',[]), #14
('_blob',[(0,7)]), #15
('_int',[(0,64)]), #16
('_struct',[[('m_region',10,0),('m_programId',14,1),('m_realm',5,2),('m_name',15,3),('m_id',16,4)]]), #17
('_struct',[[('m_a',10,0),('m_r',10,1),('m_g',10,2),('m_b',10,3)]]), #18
('_int',[(0,2)]), #19
('_struct',[[('m_name',9,0),('m_toon',17,1),('m_race',9,2),('m_color',18,3),('m_control',10,4),('m_teamId',1,5),('m_handicap',0,6),('m_observe',19,7),('m_result',19,8)]]), #20
('_array',[(0,5),20]), #21
('_optional',[21]), #22
('_blob',[(0,10)]), #23
('_blob',[(0,11)]), #24
('_struct',[[('m_file',24,0)]]), #25
('_bool',[]), #26
('_int',[(-9223372036854775808,64)]), #27
('_blob',[(0,12)]), #28
('_blob',[(40,0)]), #29
('_array',[(0,4),29]), #30
('_optional',[30]), #31
('_struct',[[('m_playerList',22,0),('m_title',23,1),('m_difficulty',9,2),('m_thumbnail',25,3),('m_isBlizzardMap',26,4),('m_timeUTC',27,5),('m_timeLocalOffset',27,6),('m_description',28,7),('m_imageFilePath',24,8),('m_mapFileName',24,9),('m_cacheHandles',31,10),('m_miniSave',26,11),('m_gameSpeed',12,12),('m_defaultDifficulty',2,13)]]), #32
('_optional',[10]), #33
('_struct',[[('m_race',33,-1)]]), #34
('_struct',[[('m_team',33,-1)]]), #35
('_struct',[[('m_name',9,-7),('m_randomSeed',5,-6),('m_racePreference',34,-5),('m_teamPreference',35,-4),('m_testMap',26,-3),('m_testAuto',26,-2),('m_observe',19,-1)]]), #36
('_array',[(0,5),36]), #37
('_struct',[[('m_lockTeams',26,-11),('m_teamsTogether',26,-10),('m_advancedSharedControl',26,-9),('m_randomRaces',26,-8),('m_battleNet',26,-7),('m_amm',26,-6),('m_ranked',26,-5),('m_noVictoryOrDefeat',26,-4),('m_fog',19,-3),('m_observers',19,-2),('m_userDifficulty',19,-1)]]), #38
('_int',[(1,4)]), #39
('_int',[(1,5)]), #40
('_int',[(1,8)]), #41
('_bitarray',[(0,6)]), #42
('_bitarray',[(0,8)]), #43
('_bitarray',[(0,2)]), #44
('_struct',[[('m_allowedColors',42,-5),('m_allowedRaces',43,-4),('m_allowedDifficulty',42,-3),('m_allowedControls',43,-2),('m_allowedObserveTypes',44,-1)]]), #45
('_array',[(0,5),45]), #46
('_struct',[[('m_randomValue',5,-23),('m_gameCacheName',23,-22),('m_gameOptions',38,-21),('m_gameSpeed',12,-20),('m_gameType',12,-19),('m_maxUsers',7,-18),('m_maxObservers',7,-17),('m_maxPlayers',7,-16),('m_maxTeams',39,-15),('m_maxColors',40,-14),('m_maxRaces',41,-13),('m_maxControls',41,-12),('m_mapSizeX',10,-11),('m_mapSizeY',10,-10),('m_mapFileSyncChecksum',5,-9),('m_mapFileName',24,-8),('m_mapAuthorName',9,-7),('m_modFileSyncChecksum',5,-6),('m_slotDescriptions',46,-5),('m_defaultDifficulty',2,-4),('m_cacheHandles',30,-3),('m_isBlizzardMap',26,-2),('m_isPremadeFFA',26,-1)]]), #47
('_optional',[1]), #48
('_optional',[7]), #49
('_struct',[[('m_color',49,-1)]]), #50
('_array',[(0,5),5]), #51
('_struct',[[('m_control',10,-9),('m_userId',48,-8),('m_teamId',1,-7),('m_colorPref',50,-6),('m_racePref',34,-5),('m_difficulty',2,-4),('m_handicap',0,-3),('m_observe',19,-2),('m_rewards',51,-1)]]), #52
('_array',[(0,5),52]), #53
('_struct',[[('m_phase',12,-9),('m_maxUsers',7,-8),('m_maxObservers',7,-7),('m_slots',53,-6),('m_randomSeed',5,-5),('m_hostUserId',48,-4),('m_isSinglePlayer',26,-3),('m_gameDuration',5,-2),('m_defaultDifficulty',2,-1)]]), #54
('_struct',[[('m_userInitialData',37,-3),('m_gameDescription',47,-2),('m_lobbyState',54,-1)]]), #55
('_struct',[[('m_syncLobbyState',55,-1)]]), #56
('_struct',[[('m_name',15,-1)]]), #57
('_blob',[(0,6)]), #58
('_struct',[[('m_name',58,-1)]]), #59
('_struct',[[('m_name',58,-3),('m_type',5,-2),('m_data',15,-1)]]), #60
('_struct',[[('m_type',5,-3),('m_name',58,-2),('m_data',28,-1)]]), #61
('_struct',[[('m_developmentCheatsEnabled',26,-4),('m_multiplayerCheatsEnabled',26,-3),('m_syncChecksummingEnabled',26,-2),('m_isMapToMapTransition',26,-1)]]), #62
('_struct',[[]]), #63
('_struct',[[('m_fileName',24,-5),('m_automatic',26,-4),('m_overwrite',26,-3),('m_name',9,-2),('m_description',23,-1)]]), #64
('_int',[(-2147483648,32)]), #65
('_struct',[[('x',65,-2),('y',65,-1)]]), #66
('_struct',[[('m_point',66,-4),('m_time',65,-3),('m_verb',23,-2),('m_arguments',23,-1)]]), #67
('_struct',[[('m_data',67,-1)]]), #68
('_int',[(0,17)]), #69
('_int',[(0,16)]), #70
('_struct',[[('m_abilLink',70,-3),('m_abilCmdIndex',7,-2),('m_abilCmdData',33,-1)]]), #71
('_optional',[71]), #72
('_null',[]), #73
('_int',[(0,20)]), #74
('_struct',[[('x',74,-3),('y',74,-2),('z',65,-1)]]), #75
('_struct',[[('m_targetUnitFlags',10,-6),('m_timer',10,-5),('m_tag',5,-4),('m_snapshotUnitLink',70,-3),('m_snapshotPlayerId',48,-2),('m_snapshotPoint',75,-1)]]), #76
('_choice',[(0,2),{0:('None',73),1:('TargetPoint',75),2:('TargetUnit',76),3:('Data',5)}]), #77
('_optional',[5]), #78
('_struct',[[('m_cmdFlags',69,-4),('m_abil',72,-3),('m_data',77,-2),('m_otherUnit',78,-1)]]), #79
('_array',[(0,8),10]), #80
('_choice',[(0,2),{0:('None',73),1:('Mask',43),2:('OneIndices',80),3:('ZeroIndices',80)}]), #81
('_struct',[[('m_unitLink',70,-3),('m_intraSubgroupPriority',10,-2),('m_count',10,-1)]]), #82
('_array',[(0,8),82]), #83
('_array',[(0,8),5]), #84
('_struct',[[('m_subgroupIndex',10,-4),('m_removeMask',81,-3),('m_addSubgroups',83,-2),('m_addUnitTags',84,-1)]]), #85
('_struct',[[('m_controlGroupId',1,-2),('m_delta',85,-1)]]), #86
('_struct',[[('m_controlGroupIndex',1,-3),('m_controlGroupUpdate',19,-2),('m_mask',81,-1)]]), #87
('_struct',[[('m_count',10,-6),('m_subgroupCount',10,-5),('m_activeSubgroupIndex',10,-4),('m_unitTagsChecksum',5,-3),('m_subgroupIndicesChecksum',5,-2),('m_subgroupsChecksum',5,-1)]]), #88
('_struct',[[('m_controlGroupId',1,-2),('m_selectionSyncData',88,-1)]]), #89
('_array',[(0,3),65]), #90
('_struct',[[('m_recipientId',1,-2),('m_resources',90,-1)]]), #91
('_struct',[[('m_chatMessage',23,-1)]]), #92
('_int',[(-128,8)]), #93
('_struct',[[('x',65,-3),('y',65,-2),('z',65,-1)]]), #94
('_struct',[[('m_beacon',93,-7),('m_ally',93,-6),('m_autocast',93,-5),('m_targetUnitTag',5,-4),('m_targetUnitSnapshotUnitLink',70,-3),('m_targetUnitSnapshotPlayerId',48,-2),('m_targetPoint',94,-1)]]), #95
('_struct',[[('m_speed',12,-1)]]), #96
('_struct',[[('m_delta',93,-1)]]), #97
('_struct',[[('m_verb',23,-2),('m_arguments',23,-1)]]), #98
('_struct',[[('m_alliance',5,-2),('m_control',5,-1)]]), #99
('_struct',[[('m_unitTag',5,-1)]]), #100
('_struct',[[('m_unitTag',5,-2),('m_flags',10,-1)]]), #101
('_struct',[[('m_conversationId',65,-2),('m_replyId',65,-1)]]), #102
('_struct',[[('m_purchaseItemId',65,-1)]]), #103
('_struct',[[('m_difficultyLevel',65,-1)]]), #104
('_choice',[(0,3),{0:('None',73),1:('Checked',26),2:('ValueChanged',5),3:('SelectionChanged',65),4:('TextChanged',24)}]), #105
('_struct',[[('m_controlId',65,-3),('m_eventType',65,-2),('m_eventData',105,-1)]]), #106
('_struct',[[('m_soundHash',5,-2),('m_length',5,-1)]]), #107
('_struct',[[('m_soundHash',84,-2),('m_length',84,-1)]]), #108
('_struct',[[('m_syncInfo',108,-1)]]), #109
('_struct',[[('m_sound',5,-1)]]), #110
('_struct',[[('m_transmissionId',65,-1)]]), #111
('_struct',[[('x',70,-2),('y',70,-1)]]), #112
('_optional',[70]), #113
('_struct',[[('m_target',112,-4),('m_distance',113,-3),('m_pitch',113,-2),('m_yaw',113,-1)]]), #114
('_int',[(0,1)]), #115
('_struct',[[('m_skipType',115,-1)]]), #116
('_struct',[[('m_button',5,-7),('m_down',26,-6),('m_posXUI',5,-5),('m_posYUI',5,-4),('m_posXWorld',65,-3),('m_posYWorld',65,-2),('m_posZWorld',65,-1)]]), #117
('_struct',[[('m_soundtrack',5,-1)]]), #118
('_struct',[[('m_planetId',65,-1)]]), #119
('_struct',[[('m_key',93,-2),('m_flags',93,-1)]]), #120
('_struct',[[('m_resources',90,-1)]]), #121
('_struct',[[('m_fulfillRequestId',65,-1)]]), #122
('_struct',[[('m_cancelRequestId',65,-1)]]), #123
('_struct',[[('m_researchItemId',65,-1)]]), #124
('_struct',[[('m_laggingPlayerId',1,-1)]]), #125
('_struct',[[('m_mercenaryId',65,-1)]]), #126
('_struct',[[('m_battleReportId',65,-2),('m_difficultyLevel',65,-1)]]), #127
('_struct',[[('m_battleReportId',65,-1)]]), #128
('_int',[(0,19)]), #129
('_struct',[[('m_decrementMs',129,-1)]]), #130
('_struct',[[('m_portraitId',65,-1)]]), #131
('_struct',[[('m_functionName',15,-1)]]), #132
('_struct',[[('m_result',65,-1)]]), #133
('_struct',[[('m_gameMenuItemIndex',65,-1)]]), #134
('_struct',[[('m_reason',93,-1)]]), #135
('_struct',[[('m_purchaseCategoryId',65,-1)]]), #136
('_struct',[[('m_button',70,-1)]]), #137
('_struct',[[('m_recipient',19,-2),('m_string',24,-1)]]), #138
('_struct',[[('m_recipient',19,-2),('m_point',66,-1)]]), #139
('_struct',[[('m_progress',65,-1)]]), #140
]
# Map from protocol NNet.Game.*Event eventid to (typeid, name)
game_event_types = {
5: (63, 'NNet.Game.SUserFinishedLoadingSyncEvent'),
7: (57, 'NNet.Game.SBankFileEvent'),
8: (59, 'NNet.Game.SBankSectionEvent'),
9: (60, 'NNet.Game.SBankKeyEvent'),
10: (61, 'NNet.Game.SBankValueEvent'),
11: (62, 'NNet.Game.SUserOptionsEvent'),
22: (64, 'NNet.Game.SSaveGameEvent'),
23: (63, 'NNet.Game.SSaveGameDoneEvent'),
25: (63, 'NNet.Game.SPlayerLeaveEvent'),
26: (68, 'NNet.Game.SGameCheatEvent'),
27: (79, 'NNet.Game.SCmdEvent'),
28: (86, 'NNet.Game.SSelectionDeltaEvent'),
29: (87, 'NNet.Game.SControlGroupUpdateEvent'),
30: (89, 'NNet.Game.SSelectionSyncCheckEvent'),
31: (91, 'NNet.Game.SResourceTradeEvent'),
32: (92, 'NNet.Game.STriggerChatMessageEvent'),
33: (95, 'NNet.Game.SAICommunicateEvent'),
34: (96, 'NNet.Game.SSetAbsoluteGameSpeedEvent'),
35: (97, 'NNet.Game.SAddAbsoluteGameSpeedEvent'),
37: (98, 'NNet.Game.SBroadcastCheatEvent'),
38: (99, 'NNet.Game.SAllianceEvent'),
39: (100, 'NNet.Game.SUnitClickEvent'),
40: (101, 'NNet.Game.SUnitHighlightEvent'),
41: (102, 'NNet.Game.STriggerReplySelectedEvent'),
44: (63, 'NNet.Game.STriggerSkippedEvent'),
45: (107, 'NNet.Game.STriggerSoundLengthQueryEvent'),
46: (110, 'NNet.Game.STriggerSoundOffsetEvent'),
47: (111, 'NNet.Game.STriggerTransmissionOffsetEvent'),
48: (111, 'NNet.Game.STriggerTransmissionCompleteEvent'),
49: (114, 'NNet.Game.SCameraUpdateEvent'),
50: (63, 'NNet.Game.STriggerAbortMissionEvent'),
51: (103, 'NNet.Game.STriggerPurchaseMadeEvent'),
52: (63, 'NNet.Game.STriggerPurchaseExitEvent'),
53: (104, 'NNet.Game.STriggerPlanetMissionLaunchedEvent'),
54: (63, 'NNet.Game.STriggerPlanetPanelCanceledEvent'),
55: (106, 'NNet.Game.STriggerDialogControlEvent'),
56: (109, 'NNet.Game.STriggerSoundLengthSyncEvent'),
57: (116, 'NNet.Game.STriggerConversationSkippedEvent'),
58: (117, 'NNet.Game.STriggerMouseClickedEvent'),
63: (63, 'NNet.Game.STriggerPlanetPanelReplayEvent'),
64: (118, 'NNet.Game.STriggerSoundtrackDoneEvent'),
65: (119, 'NNet.Game.STriggerPlanetMissionSelectedEvent'),
66: (120, 'NNet.Game.STriggerKeyPressedEvent'),
67: (132, 'NNet.Game.STriggerMovieFunctionEvent'),
68: (63, 'NNet.Game.STriggerPlanetPanelBirthCompleteEvent'),
69: (63, 'NNet.Game.STriggerPlanetPanelDeathCompleteEvent'),
70: (121, 'NNet.Game.SResourceRequestEvent'),
71: (122, 'NNet.Game.SResourceRequestFulfillEvent'),
72: (123, 'NNet.Game.SResourceRequestCancelEvent'),
73: (63, 'NNet.Game.STriggerResearchPanelExitEvent'),
74: (63, 'NNet.Game.STriggerResearchPanelPurchaseEvent'),
75: (124, 'NNet.Game.STriggerResearchPanelSelectionChangedEvent'),
76: (125, 'NNet.Game.SLagMessageEvent'),
77: (63, 'NNet.Game.STriggerMercenaryPanelExitEvent'),
78: (63, 'NNet.Game.STriggerMercenaryPanelPurchaseEvent'),
79: (126, 'NNet.Game.STriggerMercenaryPanelSelectionChangedEvent'),
80: (63, 'NNet.Game.STriggerVictoryPanelExitEvent'),
81: (63, 'NNet.Game.STriggerBattleReportPanelExitEvent'),
82: (127, 'NNet.Game.STriggerBattleReportPanelPlayMissionEvent'),
83: (128, 'NNet.Game.STriggerBattleReportPanelPlaySceneEvent'),
84: (128, 'NNet.Game.STriggerBattleReportPanelSelectionChangedEvent'),
85: (104, 'NNet.Game.STriggerVictoryPanelPlayMissionAgainEvent'),
86: (63, 'NNet.Game.STriggerMovieStartedEvent'),
87: (63, 'NNet.Game.STriggerMovieFinishedEvent'),
88: (130, 'NNet.Game.SDecrementGameTimeRemainingEvent'),
89: (131, 'NNet.Game.STriggerPortraitLoadedEvent'),
90: (133, 'NNet.Game.STriggerCustomDialogDismissedEvent'),
91: (134, 'NNet.Game.STriggerGameMenuItemSelectedEvent'),
92: (135, 'NNet.Game.STriggerCameraMoveEvent'),
93: (103, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseItemChangedEvent'),
94: (136, 'NNet.Game.STriggerPurchasePanelSelectedPurchaseCategoryChangedEvent'),
95: (137, 'NNet.Game.STriggerButtonPressedEvent'),
96: (63, 'NNet.Game.STriggerGameCreditsFinishedEvent'),
}
# The typeid of the NNet.Game.EEventId enum.
game_eventid_typeid = 0
# Map from protocol NNet.Game.*Message eventid to (typeid, name)
message_event_types = {
0: (138, 'NNet.Game.SChatMessage'),
1: (139, 'NNet.Game.SPingMessage'),
2: (140, 'NNet.Game.SLoadingProgressMessage'),
3: (63, 'NNet.Game.SServerPingMessage'),
}
# The typeid of the NNet.Game.EMessageId enum.
message_eventid_typeid = 1
# The typeid of NNet.SVarUint32 (the type used to encode gameloop deltas).
svaruint32_typeid = 6
# The typeid of NNet.Replay.SPlayerId (the type used to encode player ids).
replay_playerid_typeid = 8
# The typeid of NNet.Replay.SHeader (the type used to store replay game version and length).
replay_header_typeid = 13
# The typeid of NNet.Game.SDetails (the type used to store overall replay details).
game_details_typeid = 32
# The typeid of NNet.Replay.SInitData (the type used to store the inital lobby).
replay_initdata_typeid = 56
def _varuint32_value(value):
# Returns the numeric value from a SVarUint32 instance.
for k,v in value.iteritems():
return v
return 0
def _decode_event_stream(decoder, eventid_typeid, event_types, decode_player_id):
# Decodes events prefixed with a gameloop and possibly userid
gameloop = 0
while not decoder.done():
start_bits = decoder.used_bits()
# decode the gameloop delta before each event
delta = _varuint32_value(decoder.instance(svaruint32_typeid))
gameloop += delta
# decode the userid before each event
if decode_player_id:
playerid = decoder.instance(replay_playerid_typeid)
# decode the event id
eventid = decoder.instance(eventid_typeid)
typeid, typename = event_types.get(eventid, (None, None))
if typeid is None:
raise CorruptedError('eventid(%d) at %s' % (eventid, decoder))
# decode the event struct instance
event = decoder.instance(typeid)
event['_event'] = typename
event['_eventid'] = eventid
# insert gameloop and userid
event['_gameloop'] = gameloop
if decode_player_id:
event['_playerid'] = playerid
# the next event is byte aligned
decoder.byte_align()
# insert bits used in stream
event['_bits'] = decoder.used_bits() - start_bits
yield event
def decode_replay_game_events(contents):
"""Decodes and yields each game event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
game_eventid_typeid,
game_event_types,
decode_player_id=True):
yield event
def decode_replay_message_events(contents):
"""Decodes and yields each message event from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
for event in _decode_event_stream(decoder,
message_eventid_typeid,
message_event_types,
decode_player_id=True):
yield event
def decode_replay_header(contents):
"""Decodes and return the replay header from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(replay_header_typeid)
def decode_replay_details(contents):
"""Decodes and returns the game details from the contents byte string."""
decoder = VersionedDecoder(contents, typeinfos)
return decoder.instance(game_details_typeid)
def decode_replay_initdata(contents):
"""Decodes and return the replay init data from the contents byte string."""
decoder = BitPackedDecoder(contents, typeinfos)
return decoder.instance(replay_initdata_typeid)
def decode_replay_attributes_events(contents):
"""Decodes and yields each attribute from the contents byte string."""
buffer = BitPackedBuffer(contents, 'little')
attributes = {}
if not buffer.done():
attributes['source'] = None
attributes['mapNamespace'] = buffer.read_bits(32)
count = buffer.read_bits(32)
attributes['scopes'] = {}
while not buffer.done():
value = {}
value['namespace'] = buffer.read_bits(32)
value['attrid'] = attrid = buffer.read_bits(32)
scope = buffer.read_bits(8)
value['value'] = buffer.read_aligned_bytes(4)[::-1].strip('\x00')
if not scope in attributes['scopes']:
attributes['scopes'][scope] = {}
if not attrid in attributes['scopes'][scope]:
attributes['scopes'][scope][attrid] = []
attributes['scopes'][scope][attrid].append(value)
return attributes
def unit_tag(unitTagIndex, unitTagRecycle):
return (unitTagIndex << 18) + unitTagRecycle
def unit_tag_index(unitTag):
return (unitTag >> 18) & 0x00003fff
def unit_tag_recycle(unitTag):
return (unitTag) & 0x0003ffff
| mit |
sanguinariojoe/FreeCAD | src/Mod/Fem/feminout/convert2TetGen.py | 12 | 13032 | # ***************************************************************************
# * Copyright (c) 2010 Juergen Riegel <juergen.riegel@web.de> *
# * Copyright (c) 2018 Bernd Hahnebach <bernd@bimstatik.org> *
# * *
# * This file is part of the FreeCAD CAx development system. *
# * *
# * This program is free software; you can redistribute it and/or modify *
# * it under the terms of the GNU Lesser General Public License (LGPL) *
# * as published by the Free Software Foundation; either version 2 of *
# * the License, or (at your option) any later version. *
# * for detail see the LICENCE text file. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU Library General Public License for more details. *
# * *
# * You should have received a copy of the GNU Library General Public *
# * License along with this program; if not, write to the Free Software *
# * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
# * USA *
# * *
# ***************************************************************************
__title__ = "FreeCAD tetgen exporter"
__author__ = "Juergen Riegel"
__url__ = "https://www.freecadweb.org"
## \addtogroup FEM
# @{
# Make mesh of pn junction in TetGen format
import FreeCAD
from FreeCAD import Console
import Mesh
App = FreeCAD # shortcut
if FreeCAD.GuiUp:
import FreeCADGui
Gui = FreeCADGui # shortcut
## \addtogroup FEM
# @{
def exportMeshToTetGenPoly(meshToExport, filePath, beVerbose=1):
"""Export mesh to TetGen *.poly file format"""
# ********** Part 1 - write node list to output file
if beVerbose == 1:
Console.PrintMessage("\nExport of mesh to TetGen file ...")
(allVertices, allFacets) = meshToExport.Topology
f = open(filePath, "w")
f.write("# This file was generated from FreeCAD geometry\n")
f.write("# Part 1 - node list\n")
f.write(
"TotalNumOfPoints: {}, NumOfDimensions; {}, "
"NumOfProperties: {}, BoundaryMarkerExists: {}\n"
.format(len(allVertices), 3, 0, 0)
)
for PointIndex in range(len(allVertices)):
f.write("%(PointIndex)5i %(x) e %(y) e %(z) e\n" % {
"PointIndex": PointIndex,
"x": allVertices[PointIndex].x,
"y": allVertices[PointIndex].y,
"z": allVertices[PointIndex].z
})
# Find out BoundaryMarker for each facet. If edge connects only two facets,
# then this facets should have the same BoundaryMarker
BoundaryMarkerExists = 1
PointList = [allFacets[0][1], allFacets[0][0]]
PointList.sort()
EdgeFacets = {(PointList[0], PointList[1]): set([0])}
Edge = []
# Find all facets for each edge
for FacetIndex in range(len(allFacets)):
Facet = allFacets[FacetIndex]
for i in range(0, -len(Facet), -1):
tmpEdge = [Facet[i], Facet[i + 1]]
tmpEdge.sort()
Edge.append(tmpEdge)
for i in range(len(Edge)):
EdgeIndex = (Edge[i][0], Edge[i][1])
if EdgeIndex in EdgeFacets:
EdgeFacets[EdgeIndex].add(FacetIndex)
else:
EdgeFacets[EdgeIndex] = set([FacetIndex])
Edge = []
# Find BoundaryMarker for each facet
BoundaryMarker = []
for index in range(len(allFacets)):
BoundaryMarker.append(0)
MinMarker = -1
InitialFacet = 0
BoundaryMarker[InitialFacet] = MinMarker
EdgeKeys = EdgeFacets.keys()
# disconnectedEdges = len(EdgeKeys)
if beVerbose == 1:
Console.PrintMessage(
"\nBoundaryMarker:" + repr(BoundaryMarker) + " " + repr(len(EdgeFacets))
)
searchForPair = 1
# Main loop: first search for all complementary facets
# then fill one branch and repeat while edges are available
while len(EdgeFacets) > 0:
removeEdge = 0
for EdgeIndex in EdgeKeys:
if len(EdgeFacets[EdgeIndex]) == 1:
removeEdge = 1
break
if len(EdgeFacets[EdgeIndex]) == 2:
FacetPair = []
for facet in EdgeFacets[EdgeIndex]:
FacetPair.append(facet)
if (BoundaryMarker[FacetPair[0]] == 0) and (BoundaryMarker[FacetPair[1]] == 0):
continue
if (BoundaryMarker[FacetPair[0]] != 0) and (BoundaryMarker[FacetPair[1]] != 0):
removeEdge = 1
break
if (BoundaryMarker[FacetPair[0]] != 0):
BoundaryMarker[FacetPair[1]] = BoundaryMarker[FacetPair[0]]
else:
BoundaryMarker[FacetPair[0]] = BoundaryMarker[FacetPair[1]]
removeEdge = 1
break
if searchForPair == 1:
continue
FacetTree = []
# AllMarkers = 1
MarkerSum = 0
for facet in EdgeFacets[EdgeIndex]:
FacetTree.append(facet)
MarkerSum += BoundaryMarker[facet]
if MarkerSum == 0:
continue
for facet in EdgeFacets[EdgeIndex]:
if BoundaryMarker[facet] == 0:
MinMarker -= 1
BoundaryMarker[facet] = MinMarker
searchForPair = 1
removeEdge = 1
break
if removeEdge == 1:
del EdgeFacets[EdgeIndex]
EdgeKeys = EdgeFacets.keys()
continue
searchForPair = 0
# End of main loop
if beVerbose == 1:
Console.PrintMessage(
"\nNew BoundaryMarker:" + repr(BoundaryMarker) + " " + repr(len(EdgeFacets))
)
# ********** Part 2 - write all facets to *.poly file
f.write("# Part 2 - facet list\n")
f.write("%(TotalNumOfFacets)i %(BoundaryMarkerExists)i\n" % {
"TotalNumOfFacets": len(allFacets),
"BoundaryMarkerExists": BoundaryMarkerExists
})
for FacetIndex in range(len(allFacets)):
f.write("# FacetIndex = %(Index)i\n" % {"Index": FacetIndex})
f.write("%(NumOfPolygons)3i " % {"NumOfPolygons": 1})
if BoundaryMarkerExists == 1:
f.write("0 %(BoundaryMarker)i" % {"BoundaryMarker": BoundaryMarker[FacetIndex]})
f.write("\n%(NumOfConers)3i " % {"NumOfConers": len(allFacets[FacetIndex])})
for PointIndex in range(len(allFacets[FacetIndex])):
# f.write(repr(allFacets[FacetIndex][PointIndex]))
f.write("%(PointIndex)i " % {"PointIndex": allFacets[FacetIndex][PointIndex]})
f.write("\n")
# ********** Part 3 and Part 4 are zero
f.write("# Part 3 - the hole list.\n# There is no hole in bar.\n0\n")
f.write("# Part 4 - the region list.\n# There is no region defined.\n0\n")
f.write("# This file was generated from FreeCAD geometry\n")
f.close()
def export(objectslist, filename):
"""Called when freecad exports a mesh to poly format"""
for obj in objectslist:
if isinstance(obj, Mesh.Feature):
exportMeshToTetGenPoly(obj.Mesh, filename, False)
break
def createMesh():
# ======================== Script beginning... ========================
beVerbose = 1
if beVerbose == 1:
Console.PrintMessage("\n\n\n\n\n\n\n\nScript starts...")
# Geometry definition
# Define objects names
PyDocumentName = "pnJunction"
PSideBoxName = "PSide"
NSideBoxName = "NSide"
DepletionBoxName = "Depletion"
SurfDepletionBoxName = "SurfDepletion"
OxideBoxName = "Oxide"
AdsorbtionBoxName = "Adsorbtion"
pnMeshName = "pnMesh"
# Init objects
if beVerbose == 1:
Console.PrintMessage("\nInit Objects...")
# closeDocument after restart of macro. Needs any ActiveDocument.
# App.closeDocument(App.ActiveDocument.Label)
AppPyDoc = App.newDocument(PyDocumentName)
NSideBox = AppPyDoc.addObject("Part::Box", NSideBoxName)
PSideBox = AppPyDoc.addObject("Part::Box", PSideBoxName)
DepletionBox = AppPyDoc.addObject("Part::Box", DepletionBoxName)
SurfDepletionBox = AppPyDoc.addObject("Part::Box", SurfDepletionBoxName)
OxideBox = AppPyDoc.addObject("Part::Box", OxideBoxName)
AdsorbtionBox = AppPyDoc.addObject("Part::Box", AdsorbtionBoxName)
pnMesh = AppPyDoc.addObject("Mesh::Feature", pnMeshName)
BoxList = [
NSideBox,
DepletionBox,
PSideBox,
OxideBox,
AdsorbtionBox,
SurfDepletionBox
]
NSideBoxMesh = Mesh.Mesh()
PSideBoxMesh = Mesh.Mesh()
DepletionBoxMesh = Mesh.Mesh()
SurfDepletionBoxMesh = Mesh.Mesh()
OxideBoxMesh = Mesh.Mesh()
AdsorbtionBoxMesh = Mesh.Mesh()
BoxMeshList = [
NSideBoxMesh,
DepletionBoxMesh,
PSideBoxMesh,
OxideBoxMesh,
AdsorbtionBoxMesh,
SurfDepletionBoxMesh
]
if beVerbose == 1:
if len(BoxList) != len(BoxMeshList):
Console.PrintMessage(
"\n ERROR! Input len() of BoxList and BoxMeshList is not the same! "
)
# Set sizes in nanometers
if beVerbose == 1:
Console.PrintMessage("\nSet sizes...")
tessellationTollerance = 0.05
ModelWidth = 300
BulkHeight = 300
BulkLength = 300
DepletionSize = 50
OxideThickness = 5
AdsorbtionThickness = 10
# Big volumes of n and p material
NSideBox.Height = BulkHeight # Z-direction
NSideBox.Width = ModelWidth # Y-direction = const
NSideBox.Length = BulkLength # X-direction
PSideBox.Height = BulkHeight
PSideBox.Width = ModelWidth
PSideBox.Length = BulkLength
# Thin depletion layer between
DepletionBox.Height = BulkHeight
DepletionBox.Width = ModelWidth
DepletionBox.Length = DepletionSize * 2
# Surface deplation layer
SurfDepletionBox.Height = DepletionSize
SurfDepletionBox.Width = ModelWidth
SurfDepletionBox.Length = BulkLength * 2 + DepletionSize * 2
# Oxide on the top
OxideBox.Height = OxideThickness
OxideBox.Width = ModelWidth
OxideBox.Length = BulkLength * 2 + DepletionSize * 2
# Adsorbtion layer
AdsorbtionBox.Height = AdsorbtionThickness
AdsorbtionBox.Width = ModelWidth
AdsorbtionBox.Length = BulkLength * 2 + DepletionSize * 2
# Object placement
Rot = App.Rotation(0, 0, 0, 1)
NSideBox.Placement = App.Placement(
App.Vector(0, 0, -BulkHeight),
Rot
)
PSideBox.Placement = App.Placement(
App.Vector(DepletionSize * 2 + BulkLength, 0, -BulkHeight),
Rot
)
DepletionBox.Placement = App.Placement(
App.Vector(BulkLength, 0, -BulkHeight),
Rot
)
SurfDepletionBox.Placement = App.Placement(
App.Vector(0, 0, 0),
Rot
)
OxideBox.Placement = App.Placement(
App.Vector(0, 0, DepletionSize),
Rot
)
AdsorbtionBox.Placement = App.Placement(
App.Vector(0, 0, DepletionSize + OxideThickness),
Rot
)
# Unite
if beVerbose == 1:
Console.PrintMessage("\nFuse objects...")
fuseShape = BoxList[0].Shape
for index in range(1, len(BoxList), 1):
fuseShape = fuseShape.fuse(BoxList[index].Shape)
nmesh = Mesh.Mesh()
nmesh.addFacets(fuseShape.tessellate(tessellationTollerance))
# for index in range(len(BoxList)):
for index in range(len(BoxList) - 1): # Manual hack
BoxMeshList[index].addFacets(
BoxList[index].Shape.tessellate(tessellationTollerance)
)
nmesh.addMesh(BoxMeshList[index])
nmesh.removeDuplicatedPoints()
nmesh.removeDuplicatedFacets()
pnMesh.Mesh = nmesh
if FreeCAD.GuiUp:
# Hide all boxes
for box in BoxList:
Gui.hideObject(box)
# Remove all boxes
# for box in BoxList:
# App.ActiveDocument.removeObject(box.Name)
# Update document
AppPyDoc.recompute()
# export to TenGen *.poly (use File|Export instead)
# filePath = "/home/tig/tmp/tetgen/pnJunction.poly"
# exportMeshToTetGenPoly(pnMesh.Mesh,filePath,beVerbose)
if FreeCAD.GuiUp:
pnMesh.ViewObject.Document.activeView().viewAxonometric()
pnMesh.ViewObject.Document.activeView().fitAll()
if beVerbose == 1:
Console.PrintMessage("\nScript finished without errors.")
## @}
| lgpl-2.1 |
knifenomad/django | tests/generic_views/test_edit.py | 51 | 18099 | from __future__ import unicode_literals
from django import forms
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.client import RequestFactory
from django.views.generic.base import View
from django.views.generic.edit import CreateView, FormMixin, ModelFormMixin
from . import views
from .models import Artist, Author
from .test_forms import AuthorForm
class FormMixinTests(SimpleTestCase):
def test_initial_data(self):
""" Test instance independence of initial data dict (see #16138) """
initial_1 = FormMixin().get_initial()
initial_1['foo'] = 'bar'
initial_2 = FormMixin().get_initial()
self.assertNotEqual(initial_1, initial_2)
def test_get_prefix(self):
""" Test prefix can be set (see #18872) """
test_string = 'test'
rf = RequestFactory()
get_request = rf.get('/')
class TestFormMixin(FormMixin):
request = get_request
default_kwargs = TestFormMixin().get_form_kwargs()
self.assertIsNone(default_kwargs.get('prefix'))
set_mixin = TestFormMixin()
set_mixin.prefix = test_string
set_kwargs = set_mixin.get_form_kwargs()
self.assertEqual(test_string, set_kwargs.get('prefix'))
def test_get_form(self):
class TestFormMixin(FormMixin):
request = RequestFactory().get('/')
self.assertIsInstance(
TestFormMixin().get_form(forms.Form), forms.Form,
'get_form() should use provided form class.'
)
class FormClassTestFormMixin(TestFormMixin):
form_class = forms.Form
self.assertIsInstance(
FormClassTestFormMixin().get_form(), forms.Form,
'get_form() should fallback to get_form_class() if none is provided.'
)
def test_get_context_data(self):
class FormContext(FormMixin):
request = RequestFactory().get('/')
form_class = forms.Form
self.assertIsInstance(FormContext().get_context_data()['form'], forms.Form)
@override_settings(ROOT_URLCONF='generic_views.urls')
class BasicFormTests(TestCase):
def test_post_data(self):
res = self.client.post('/contact/', {'name': "Me", 'message': "Hello"})
self.assertRedirects(res, '/list/authors/')
class ModelFormMixinTests(SimpleTestCase):
def test_get_form(self):
form_class = views.AuthorGetQuerySetFormView().get_form_class()
self.assertEqual(form_class._meta.model, Author)
def test_get_form_checks_for_object(self):
mixin = ModelFormMixin()
mixin.request = RequestFactory().get('/')
self.assertEqual({'initial': {}, 'prefix': None},
mixin.get_form_kwargs())
@override_settings(ROOT_URLCONF='generic_views.urls')
class CreateViewTests(TestCase):
def test_create(self):
res = self.client.get('/edit/authors/create/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
res = self.client.post('/edit/authors/create/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_invalid(self):
res = self.client.post('/edit/authors/create/',
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertEqual(Author.objects.count(), 0)
def test_create_with_object_url(self):
res = self.client.post('/edit/artists/create/',
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
artist = Artist.objects.get(name='Rene Magritte')
self.assertRedirects(res, '/detail/artist/%d/' % artist.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_create_with_redirect(self):
res = self.client.post('/edit/authors/create/redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_with_interpolated_redirect(self):
res = self.client.post(
'/edit/authors/create/interpolate_redirect/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'}
)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.first().pk
self.assertRedirects(res, '/edit/author/%d/update/' % pk)
# Also test with escaped chars in URL
res = self.client.post(
'/edit/authors/create/interpolate_redirect_nonascii/',
{'name': 'John Doe', 'slug': 'john-doe'}
)
self.assertEqual(res.status_code, 302)
pk = Author.objects.get(name='John Doe').pk
self.assertRedirects(res, '/%C3%A9dit/author/{}/update/'.format(pk))
def test_create_with_special_properties(self):
res = self.client.get('/edit/authors/create/special/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertNotIn('object', res.context)
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/authors/create/special/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
obj = Author.objects.get(slug='randall-munroe')
self.assertRedirects(res, reverse('author_detail', kwargs={'pk': obj.pk}))
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_create_without_redirect(self):
try:
self.client.post('/edit/authors/create/naive/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.fail('Should raise exception -- No redirect URL provided, and no get_absolute_url provided')
except ImproperlyConfigured:
pass
def test_create_restricted(self):
res = self.client.post('/edit/authors/create/restricted/',
{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/accounts/login/?next=/edit/authors/create/restricted/')
def test_create_view_with_restricted_fields(self):
class MyCreateView(CreateView):
model = Author
fields = ['name']
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name'])
def test_create_view_all_fields(self):
class MyCreateView(CreateView):
model = Author
fields = '__all__'
self.assertEqual(list(MyCreateView().get_form_class().base_fields),
['name', 'slug'])
def test_create_view_without_explicit_fields(self):
class MyCreateView(CreateView):
model = Author
message = (
"Using ModelFormMixin (base class of MyCreateView) without the "
"'fields' attribute is prohibited."
)
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
def test_define_both_fields_and_form_class(self):
class MyCreateView(CreateView):
model = Author
form_class = AuthorForm
fields = ['name']
message = "Specifying both 'fields' and 'form_class' is not permitted."
with self.assertRaisesMessage(ImproperlyConfigured, message):
MyCreateView().get_form_class()
@override_settings(ROOT_URLCONF='generic_views.urls')
class UpdateViewTests(TestCase):
def test_update_post(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
def test_update_invalid(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/' % a.pk,
{'name': 'A' * 101, 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 200)
self.assertTemplateUsed(res, 'generic_views/author_form.html')
self.assertEqual(len(res.context['form'].errors), 1)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe>'])
def test_update_with_object_url(self):
a = Artist.objects.create(name='Rene Magritte')
res = self.client.post('/edit/artists/%d/update/' % a.pk,
{'name': 'Rene Magritte'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/detail/artist/%d/' % a.pk)
self.assertQuerysetEqual(Artist.objects.all(), ['<Artist: Rene Magritte>'])
def test_update_with_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post('/edit/author/%d/update/redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_with_interpolated_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.post(
'/edit/author/%d/update/interpolate_redirect/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'}
)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
self.assertEqual(res.status_code, 302)
pk = Author.objects.first().pk
self.assertRedirects(res, '/edit/author/%d/update/' % pk)
# Also test with escaped chars in URL
res = self.client.post(
'/edit/author/%d/update/interpolate_redirect_nonascii/' % a.pk,
{'name': 'John Doe', 'slug': 'john-doe'}
)
self.assertEqual(res.status_code, 302)
pk = Author.objects.get(name='John Doe').pk
self.assertRedirects(res, '/%C3%A9dit/author/{}/update/'.format(pk))
def test_update_with_special_properties(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/%d/update/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], views.AuthorForm)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/form.html')
res = self.client.post('/edit/author/%d/update/special/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/detail/author/%d/' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (author of xkcd)>'])
def test_update_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/update/naive/' % a.pk,
{'name': 'Randall Munroe (author of xkcd)', 'slug': 'randall-munroe'})
def test_update_get_object(self):
a = Author.objects.create(
pk=1,
name='Randall Munroe',
slug='randall-munroe',
)
res = self.client.get('/edit/author/update/')
self.assertEqual(res.status_code, 200)
self.assertIsInstance(res.context['form'], forms.ModelForm)
self.assertIsInstance(res.context['view'], View)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_form.html')
# Modification with both POST and PUT (browser compatible)
res = self.client.post('/edit/author/update/',
{'name': 'Randall Munroe (xkcd)', 'slug': 'randall-munroe'})
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), ['<Author: Randall Munroe (xkcd)>'])
@override_settings(ROOT_URLCONF='generic_views.urls')
class DeleteViewTests(TestCase):
def test_delete_by_post(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['author'], Author.objects.get(pk=a.pk))
self.assertTemplateUsed(res, 'generic_views/author_confirm_delete.html')
# Deletion with POST
res = self.client.post('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_by_delete(self):
# Deletion with browser compatible DELETE method
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.delete('/edit/author/%d/delete/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_with_interpolated_redirect(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/%d/delete/interpolate_redirect/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/edit/authors/create/?deleted=%d' % a.pk)
self.assertQuerysetEqual(Author.objects.all(), [])
# Also test with escaped chars in URL
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.post('/edit/author/{}/delete/interpolate_redirect_nonascii/'.format(a.pk))
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/%C3%A9dit/authors/create/?deleted={}'.format(a.pk))
def test_delete_with_special_properties(self):
a = Author.objects.create(**{'name': 'Randall Munroe', 'slug': 'randall-munroe'})
res = self.client.get('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 200)
self.assertEqual(res.context['object'], Author.objects.get(pk=a.pk))
self.assertEqual(res.context['thingy'], Author.objects.get(pk=a.pk))
self.assertNotIn('author', res.context)
self.assertTemplateUsed(res, 'generic_views/confirm_delete.html')
res = self.client.post('/edit/author/%d/delete/special/' % a.pk)
self.assertEqual(res.status_code, 302)
self.assertRedirects(res, '/list/authors/')
self.assertQuerysetEqual(Author.objects.all(), [])
def test_delete_without_redirect(self):
a = Author.objects.create(
name='Randall Munroe',
slug='randall-munroe',
)
# Should raise exception -- No redirect URL provided, and no
# get_absolute_url provided
with self.assertRaises(ImproperlyConfigured):
self.client.post('/edit/author/%d/delete/naive/' % a.pk)
| bsd-3-clause |
saurabhjn76/sympy | sympy/functions/special/tests/test_hyper.py | 58 | 13415 | from sympy import (hyper, meijerg, S, Tuple, pi, I, exp, log,
cos, sqrt, symbols, oo, Derivative, gamma, O)
from sympy.series.limits import limit
from sympy.abc import x, z, k
from sympy.utilities.pytest import raises, slow
from sympy.utilities.randtest import (
random_complex_number as randcplx,
verify_numerically as tn,
test_derivative_numerically as td)
def test_TupleParametersBase():
# test that our implementation of the chain rule works
p = hyper((), (), z**2)
assert p.diff(z) == p*2*z
def test_hyper():
raises(TypeError, lambda: hyper(1, 2, z))
assert hyper((1, 2), (1,), z) == hyper(Tuple(1, 2), Tuple(1), z)
h = hyper((1, 2), (3, 4, 5), z)
assert h.ap == Tuple(1, 2)
assert h.bq == Tuple(3, 4, 5)
assert h.argument == z
assert h.is_commutative is True
# just a few checks to make sure that all arguments go where they should
assert tn(hyper(Tuple(), Tuple(), z), exp(z), z)
assert tn(z*hyper((1, 1), Tuple(2), -z), log(1 + z), z)
# differentiation
h = hyper(
(randcplx(), randcplx(), randcplx()), (randcplx(), randcplx()), z)
assert td(h, z)
a1, a2, b1, b2, b3 = symbols('a1:3, b1:4')
assert hyper((a1, a2), (b1, b2, b3), z).diff(z) == \
a1*a2/(b1*b2*b3) * hyper((a1 + 1, a2 + 1), (b1 + 1, b2 + 1, b3 + 1), z)
# differentiation wrt parameters is not supported
assert hyper([z], [], z).diff(z) == Derivative(hyper([z], [], z), z)
# hyper is unbranched wrt parameters
from sympy import polar_lift
assert hyper([polar_lift(z)], [polar_lift(k)], polar_lift(x)) == \
hyper([z], [k], polar_lift(x))
def test_expand_func():
# evaluation at 1 of Gauss' hypergeometric function:
from sympy.abc import a, b, c
from sympy import gamma, expand_func
a1, b1, c1 = randcplx(), randcplx(), randcplx() + 5
assert expand_func(hyper([a, b], [c], 1)) == \
gamma(c)*gamma(-a - b + c)/(gamma(-a + c)*gamma(-b + c))
assert abs(expand_func(hyper([a1, b1], [c1], 1)).n()
- hyper([a1, b1], [c1], 1).n()) < 1e-10
# hyperexpand wrapper for hyper:
assert expand_func(hyper([], [], z)) == exp(z)
assert expand_func(hyper([1, 2, 3], [], z)) == hyper([1, 2, 3], [], z)
assert expand_func(meijerg([[1, 1], []], [[1], [0]], z)) == log(z + 1)
assert expand_func(meijerg([[1, 1], []], [[], []], z)) == \
meijerg([[1, 1], []], [[], []], z)
def replace_dummy(expr, sym):
from sympy import Dummy
dum = expr.atoms(Dummy)
if not dum:
return expr
assert len(dum) == 1
return expr.xreplace({dum.pop(): sym})
def test_hyper_rewrite_sum():
from sympy import RisingFactorial, factorial, Dummy, Sum
_k = Dummy("k")
assert replace_dummy(hyper((1, 2), (1, 3), x).rewrite(Sum), _k) == \
Sum(x**_k / factorial(_k) * RisingFactorial(2, _k) /
RisingFactorial(3, _k), (_k, 0, oo))
assert hyper((1, 2, 3), (-1, 3), z).rewrite(Sum) == \
hyper((1, 2, 3), (-1, 3), z)
def test_radius_of_convergence():
assert hyper((1, 2), [3], z).radius_of_convergence == 1
assert hyper((1, 2), [3, 4], z).radius_of_convergence == oo
assert hyper((1, 2, 3), [4], z).radius_of_convergence == 0
assert hyper((0, 1, 2), [4], z).radius_of_convergence == oo
assert hyper((-1, 1, 2), [-4], z).radius_of_convergence == 0
assert hyper((-1, -2, 2), [-1], z).radius_of_convergence == oo
assert hyper((-1, 2), [-1, -2], z).radius_of_convergence == 0
assert hyper([-1, 1, 3], [-2, 2], z).radius_of_convergence == 1
assert hyper([-1, 1], [-2, 2], z).radius_of_convergence == oo
assert hyper([-1, 1, 3], [-2], z).radius_of_convergence == 0
assert hyper((-1, 2, 3, 4), [], z).radius_of_convergence == oo
assert hyper([1, 1], [3], 1).convergence_statement == True
assert hyper([1, 1], [2], 1).convergence_statement == False
assert hyper([1, 1], [2], -1).convergence_statement == True
assert hyper([1, 1], [1], -1).convergence_statement == False
def test_meijer():
raises(TypeError, lambda: meijerg(1, z))
raises(TypeError, lambda: meijerg(((1,), (2,)), (3,), (4,), z))
assert meijerg(((1, 2), (3,)), ((4,), (5,)), z) == \
meijerg(Tuple(1, 2), Tuple(3), Tuple(4), Tuple(5), z)
g = meijerg((1, 2), (3, 4, 5), (6, 7, 8, 9), (10, 11, 12, 13, 14), z)
assert g.an == Tuple(1, 2)
assert g.ap == Tuple(1, 2, 3, 4, 5)
assert g.aother == Tuple(3, 4, 5)
assert g.bm == Tuple(6, 7, 8, 9)
assert g.bq == Tuple(6, 7, 8, 9, 10, 11, 12, 13, 14)
assert g.bother == Tuple(10, 11, 12, 13, 14)
assert g.argument == z
assert g.nu == 75
assert g.delta == -1
assert g.is_commutative is True
assert meijerg([1, 2], [3], [4], [5], z).delta == S(1)/2
# just a few checks to make sure that all arguments go where they should
assert tn(meijerg(Tuple(), Tuple(), Tuple(0), Tuple(), -z), exp(z), z)
assert tn(sqrt(pi)*meijerg(Tuple(), Tuple(),
Tuple(0), Tuple(S(1)/2), z**2/4), cos(z), z)
assert tn(meijerg(Tuple(1, 1), Tuple(), Tuple(1), Tuple(0), z),
log(1 + z), z)
# differentiation
g = meijerg((randcplx(),), (randcplx() + 2*I,), Tuple(),
(randcplx(), randcplx()), z)
assert td(g, z)
g = meijerg(Tuple(), (randcplx(),), Tuple(),
(randcplx(), randcplx()), z)
assert td(g, z)
g = meijerg(Tuple(), Tuple(), Tuple(randcplx()),
Tuple(randcplx(), randcplx()), z)
assert td(g, z)
a1, a2, b1, b2, c1, c2, d1, d2 = symbols('a1:3, b1:3, c1:3, d1:3')
assert meijerg((a1, a2), (b1, b2), (c1, c2), (d1, d2), z).diff(z) == \
(meijerg((a1 - 1, a2), (b1, b2), (c1, c2), (d1, d2), z)
+ (a1 - 1)*meijerg((a1, a2), (b1, b2), (c1, c2), (d1, d2), z))/z
assert meijerg([z, z], [], [], [], z).diff(z) == \
Derivative(meijerg([z, z], [], [], [], z), z)
# meijerg is unbranched wrt parameters
from sympy import polar_lift as pl
assert meijerg([pl(a1)], [pl(a2)], [pl(b1)], [pl(b2)], pl(z)) == \
meijerg([a1], [a2], [b1], [b2], pl(z))
# integrand
from sympy.abc import a, b, c, d, s
assert meijerg([a], [b], [c], [d], z).integrand(s) == \
z**s*gamma(c - s)*gamma(-a + s + 1)/(gamma(b - s)*gamma(-d + s + 1))
def test_meijerg_derivative():
assert meijerg([], [1, 1], [0, 0, x], [], z).diff(x) == \
log(z)*meijerg([], [1, 1], [0, 0, x], [], z) \
+ 2*meijerg([], [1, 1, 1], [0, 0, x, 0], [], z)
y = randcplx()
a = 5 # mpmath chokes with non-real numbers, and Mod1 with floats
assert td(meijerg([x], [], [], [], y), x)
assert td(meijerg([x**2], [], [], [], y), x)
assert td(meijerg([], [x], [], [], y), x)
assert td(meijerg([], [], [x], [], y), x)
assert td(meijerg([], [], [], [x], y), x)
assert td(meijerg([x], [a], [a + 1], [], y), x)
assert td(meijerg([x], [a + 1], [a], [], y), x)
assert td(meijerg([x, a], [], [], [a + 1], y), x)
assert td(meijerg([x, a + 1], [], [], [a], y), x)
b = S(3)/2
assert td(meijerg([a + 2], [b], [b - 3, x], [a], y), x)
def test_meijerg_period():
assert meijerg([], [1], [0], [], x).get_period() == 2*pi
assert meijerg([1], [], [], [0], x).get_period() == 2*pi
assert meijerg([], [], [0], [], x).get_period() == 2*pi # exp(x)
assert meijerg(
[], [], [0], [S(1)/2], x).get_period() == 2*pi # cos(sqrt(x))
assert meijerg(
[], [], [S(1)/2], [0], x).get_period() == 4*pi # sin(sqrt(x))
assert meijerg([1, 1], [], [1], [0], x).get_period() == oo # log(1 + x)
def test_hyper_unpolarify():
from sympy import exp_polar
a = exp_polar(2*pi*I)*x
b = x
assert hyper([], [], a).argument == b
assert hyper([0], [], a).argument == a
assert hyper([0], [0], a).argument == b
assert hyper([0, 1], [0], a).argument == a
@slow
def test_hyperrep():
from sympy.functions.special.hyper import (HyperRep, HyperRep_atanh,
HyperRep_power1, HyperRep_power2, HyperRep_log1, HyperRep_asin1,
HyperRep_asin2, HyperRep_sqrts1, HyperRep_sqrts2, HyperRep_log2,
HyperRep_cosasin, HyperRep_sinasin)
# First test the base class works.
from sympy import Piecewise, exp_polar
a, b, c, d, z = symbols('a b c d z')
class myrep(HyperRep):
@classmethod
def _expr_small(cls, x):
return a
@classmethod
def _expr_small_minus(cls, x):
return b
@classmethod
def _expr_big(cls, x, n):
return c*n
@classmethod
def _expr_big_minus(cls, x, n):
return d*n
assert myrep(z).rewrite('nonrep') == Piecewise((0, abs(z) > 1), (a, True))
assert myrep(exp_polar(I*pi)*z).rewrite('nonrep') == \
Piecewise((0, abs(z) > 1), (b, True))
assert myrep(exp_polar(2*I*pi)*z).rewrite('nonrep') == \
Piecewise((c, abs(z) > 1), (a, True))
assert myrep(exp_polar(3*I*pi)*z).rewrite('nonrep') == \
Piecewise((d, abs(z) > 1), (b, True))
assert myrep(exp_polar(4*I*pi)*z).rewrite('nonrep') == \
Piecewise((2*c, abs(z) > 1), (a, True))
assert myrep(exp_polar(5*I*pi)*z).rewrite('nonrep') == \
Piecewise((2*d, abs(z) > 1), (b, True))
assert myrep(z).rewrite('nonrepsmall') == a
assert myrep(exp_polar(I*pi)*z).rewrite('nonrepsmall') == b
def t(func, hyp, z):
""" Test that func is a valid representation of hyp. """
# First test that func agrees with hyp for small z
if not tn(func.rewrite('nonrepsmall'), hyp, z,
a=S(-1)/2, b=S(-1)/2, c=S(1)/2, d=S(1)/2):
return False
# Next check that the two small representations agree.
if not tn(
func.rewrite('nonrepsmall').subs(
z, exp_polar(I*pi)*z).replace(exp_polar, exp),
func.subs(z, exp_polar(I*pi)*z).rewrite('nonrepsmall'),
z, a=S(-1)/2, b=S(-1)/2, c=S(1)/2, d=S(1)/2):
return False
# Next check continuity along exp_polar(I*pi)*t
expr = func.subs(z, exp_polar(I*pi)*z).rewrite('nonrep')
if abs(expr.subs(z, 1 + 1e-15).n() - expr.subs(z, 1 - 1e-15).n()) > 1e-10:
return False
# Finally check continuity of the big reps.
def dosubs(func, a, b):
rv = func.subs(z, exp_polar(a)*z).rewrite('nonrep')
return rv.subs(z, exp_polar(b)*z).replace(exp_polar, exp)
for n in [0, 1, 2, 3, 4, -1, -2, -3, -4]:
expr1 = dosubs(func, 2*I*pi*n, I*pi/2)
expr2 = dosubs(func, 2*I*pi*n + I*pi, -I*pi/2)
if not tn(expr1, expr2, z):
return False
expr1 = dosubs(func, 2*I*pi*(n + 1), -I*pi/2)
expr2 = dosubs(func, 2*I*pi*n + I*pi, I*pi/2)
if not tn(expr1, expr2, z):
return False
return True
# Now test the various representatives.
a = S(1)/3
assert t(HyperRep_atanh(z), hyper([S(1)/2, 1], [S(3)/2], z), z)
assert t(HyperRep_power1(a, z), hyper([-a], [], z), z)
assert t(HyperRep_power2(a, z), hyper([a, a - S(1)/2], [2*a], z), z)
assert t(HyperRep_log1(z), -z*hyper([1, 1], [2], z), z)
assert t(HyperRep_asin1(z), hyper([S(1)/2, S(1)/2], [S(3)/2], z), z)
assert t(HyperRep_asin2(z), hyper([1, 1], [S(3)/2], z), z)
assert t(HyperRep_sqrts1(a, z), hyper([-a, S(1)/2 - a], [S(1)/2], z), z)
assert t(HyperRep_sqrts2(a, z),
-2*z/(2*a + 1)*hyper([-a - S(1)/2, -a], [S(1)/2], z).diff(z), z)
assert t(HyperRep_log2(z), -z/4*hyper([S(3)/2, 1, 1], [2, 2], z), z)
assert t(HyperRep_cosasin(a, z), hyper([-a, a], [S(1)/2], z), z)
assert t(HyperRep_sinasin(a, z), 2*a*z*hyper([1 - a, 1 + a], [S(3)/2], z), z)
@slow
def test_meijerg_eval():
from sympy import besseli, exp_polar
from sympy.abc import l
a = randcplx()
arg = x*exp_polar(k*pi*I)
expr1 = pi*meijerg([[], [(a + 1)/2]], [[a/2], [-a/2, (a + 1)/2]], arg**2/4)
expr2 = besseli(a, arg)
# Test that the two expressions agree for all arguments.
for x_ in [0.5, 1.5]:
for k_ in [0.0, 0.1, 0.3, 0.5, 0.8, 1, 5.751, 15.3]:
assert abs((expr1 - expr2).n(subs={x: x_, k: k_})) < 1e-10
assert abs((expr1 - expr2).n(subs={x: x_, k: -k_})) < 1e-10
# Test continuity independently
eps = 1e-13
expr2 = expr1.subs(k, l)
for x_ in [0.5, 1.5]:
for k_ in [0.5, S(1)/3, 0.25, 0.75, S(2)/3, 1.0, 1.5]:
assert abs((expr1 - expr2).n(
subs={x: x_, k: k_ + eps, l: k_ - eps})) < 1e-10
assert abs((expr1 - expr2).n(
subs={x: x_, k: -k_ + eps, l: -k_ - eps})) < 1e-10
expr = (meijerg(((0.5,), ()), ((0.5, 0, 0.5), ()), exp_polar(-I*pi)/4)
+ meijerg(((0.5,), ()), ((0.5, 0, 0.5), ()), exp_polar(I*pi)/4)) \
/(2*sqrt(pi))
assert (expr - pi/exp(1)).n(chop=True) == 0
def test_limits():
k, x = symbols('k, x')
assert hyper((1,), (S(4)/3, S(5)/3), k**2).series(k) == \
hyper((1,), (S(4)/3, S(5)/3), 0) + \
9*k**2*hyper((2,), (S(7)/3, S(8)/3), 0)/20 + \
81*k**4*hyper((3,), (S(10)/3, S(11)/3), 0)/1120 + \
O(k**6) # issue 6350
assert limit(meijerg((), (), (1,), (0,), -x), x, 0) == \
meijerg(((), ()), ((1,), (0,)), 0) # issue 6052
| bsd-3-clause |
ccellis/WHACK2016 | flask/lib/python2.7/site-packages/whoosh/fields.py | 2 | 56441 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""
Contains functions and classes related to fields.
"""
import datetime, fnmatch, re, struct, sys
from array import array
from decimal import Decimal
from whoosh import analysis, columns, formats
from whoosh.compat import with_metaclass
from whoosh.compat import itervalues, xrange
from whoosh.compat import bytes_type, string_type, text_type
from whoosh.system import emptybytes
from whoosh.system import pack_byte, unpack_byte
from whoosh.util.numeric import to_sortable, from_sortable
from whoosh.util.numeric import typecode_max, NaN
from whoosh.util.text import utf8encode, utf8decode
from whoosh.util.times import datetime_to_long, long_to_datetime
# Exceptions
class FieldConfigurationError(Exception):
pass
class UnknownFieldError(Exception):
pass
# Field Types
class FieldType(object):
"""
Represents a field configuration.
The FieldType object supports the following attributes:
* format (formats.Format): the storage format for posting blocks.
* analyzer (analysis.Analyzer): the analyzer to use to turn text into
terms.
* scorable (boolean): whether searches against this field may be scored.
This controls whether the index stores per-document field lengths for
this field.
* stored (boolean): whether the content of this field is stored for each
document. For example, in addition to indexing the title of a document,
you usually want to store the title so it can be presented as part of
the search results.
* unique (boolean): whether this field's value is unique to each document.
For example, 'path' or 'ID'. IndexWriter.update_document() will use
fields marked as 'unique' to find the previous version of a document
being updated.
* multitoken_query is a string indicating what kind of query to use when
a "word" in a user query parses into multiple tokens. The string is
interpreted by the query parser. The strings understood by the default
query parser are "first" (use first token only), "and" (join the tokens
with an AND query), "or" (join the tokens with OR), "phrase" (join
the tokens with a phrase query), and "default" (use the query parser's
default join type).
* vector (formats.Format or boolean): the format to use to store term
vectors. If not a ``Format`` object, any true value means to use the
index format as the term vector format. Any flase value means don't
store term vectors for this field.
The constructor for the base field type simply lets you supply your own
attribute values. Subclasses may configure some or all of this for you.
"""
analyzer = format = scorable = stored = unique = vector = None
indexed = True
multitoken_query = "default"
sortable_typecode = None
column_type = None
def __init__(self, format, analyzer, scorable=False,
stored=False, unique=False, multitoken_query="default",
sortable=False, vector=None):
self.format = format
self.analyzer = analyzer
self.scorable = scorable
self.stored = stored
self.unique = unique
self.multitoken_query = multitoken_query
self.set_sortable(sortable)
if isinstance(vector, formats.Format):
self.vector = vector
elif vector:
self.vector = self.format
else:
self.vector = None
def __repr__(self):
return ("%s(format=%r, scorable=%s, stored=%s, unique=%s)"
% (self.__class__.__name__, self.format, self.scorable,
self.stored, self.unique))
def __eq__(self, other):
return all((isinstance(other, FieldType),
(self.format == other.format),
(self.scorable == other.scorable),
(self.stored == other.stored),
(self.unique == other.unique),
(self.column_type == other.column_type)))
def __ne__(self, other):
return not(self.__eq__(other))
# Text
def index(self, value, **kwargs):
"""Returns an iterator of (btext, frequency, weight, encoded_value)
tuples for each unique word in the input value.
The default implementation uses the ``analyzer`` attribute to tokenize
the value into strings, then encodes them into bytes using UTF-8.
"""
if not self.format:
raise Exception("%s field %r cannot index without a format"
% (self.__class__.__name__, self))
if not isinstance(value, (text_type, list, tuple)):
raise ValueError("%r is not unicode or sequence" % value)
assert isinstance(self.format, formats.Format)
if "mode" not in kwargs:
kwargs["mode"] = "index"
word_values = self.format.word_values
ana = self.analyzer
for tstring, freq, wt, vbytes in word_values(value, ana, **kwargs):
yield (utf8encode(tstring)[0], freq, wt, vbytes)
def tokenize(self, value, **kwargs):
"""
Analyzes the given string and returns an iterator of Token objects
(note: for performance reasons, actually the same token yielded over
and over with different attributes).
"""
if not self.analyzer:
raise Exception("%s field has no analyzer" % self.__class__)
return self.analyzer(value, **kwargs)
def process_text(self, qstring, mode='', **kwargs):
"""
Analyzes the given string and returns an iterator of token texts.
>>> field = fields.TEXT()
>>> list(field.process_text("The ides of March"))
["ides", "march"]
"""
if not self.format:
raise Exception("%s field has no format" % self)
return (t.text for t in self.tokenize(qstring, mode=mode, **kwargs))
# Conversion
def to_bytes(self, value):
"""
Returns a bytes representation of the given value, appropriate to be
written to disk. The default implementation assumes a unicode value and
encodes it using UTF-8.
"""
if isinstance(value, (list, tuple)):
value = value[0]
if not isinstance(value, bytes_type):
value = utf8encode(value)[0]
return value
def to_column_value(self, value):
"""
Returns an object suitable to be inserted into the document values
column for this field. The default implementation simply calls
``self.to_bytes(value)``.
"""
return self.to_bytes(value)
def from_bytes(self, bs):
return utf8decode(bs)[0]
def from_column_value(self, value):
return self.from_bytes(value)
# Columns/sorting
def set_sortable(self, sortable):
if sortable:
if isinstance(sortable, columns.Column):
self.column_type = sortable
else:
self.column_type = self.default_column()
else:
self.column_type = None
def sortable_terms(self, ixreader, fieldname):
"""
Returns an iterator of the "sortable" tokens in the given reader and
field. These values can be used for sorting. The default implementation
simply returns all tokens in the field.
This can be overridden by field types such as NUMERIC where some values
in a field are not useful for sorting.
"""
return ixreader.lexicon(fieldname)
def default_column(self):
return columns.VarBytesColumn()
# Parsing
def self_parsing(self):
"""
Subclasses should override this method to return True if they want
the query parser to call the field's ``parse_query()`` method instead
of running the analyzer on text in this field. This is useful where
the field needs full control over how queries are interpreted, such
as in the numeric field type.
"""
return False
def parse_query(self, fieldname, qstring, boost=1.0):
"""
When ``self_parsing()`` returns True, the query parser will call
this method to parse basic query text.
"""
raise NotImplementedError(self.__class__.__name__)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
"""
When ``self_parsing()`` returns True, the query parser will call
this method to parse range query text. If this method returns None
instead of a query object, the parser will fall back to parsing the
start and end terms using process_text().
"""
return None
# Spelling
def separate_spelling(self):
"""
Returns True if the field stores unstemmed words in a separate field for
spelling suggestions.
"""
return False
def spelling_fieldname(self, fieldname):
"""
Returns the name of a field to use for spelling suggestions instead of
this field.
:param fieldname: the name of this field.
"""
return fieldname
def spellable_words(self, value):
"""Returns an iterator of each unique word (in sorted order) in the
input value, suitable for inclusion in the field's word graph.
The default behavior is to call the field analyzer with the keyword
argument ``no_morph=True``, which should make the analyzer skip any
morphological transformation filters (e.g. stemming) to preserve the
original form of the words. Exotic field types may need to override
this behavior.
"""
if isinstance(value, (list, tuple)):
words = value
else:
words = [token.text for token
in self.analyzer(value, no_morph=True)]
return iter(sorted(set(words)))
# Utility
def subfields(self):
"""
Returns an iterator of ``(name_prefix, fieldobject)`` pairs for the
fields that need to be indexed when content is put in this field. The
default implementation simply yields ``("", self)``.
"""
yield "", self
def supports(self, name):
"""
Returns True if the underlying format supports the given posting
value type.
>>> field = TEXT()
>>> field.supports("positions")
True
>>> field.supports("chars")
False
"""
return self.format.supports(name)
def clean(self):
"""
Clears any cached information in the field and any child objects.
"""
if self.format and hasattr(self.format, "clean"):
self.format.clean()
# Events
def on_add(self, schema, fieldname):
pass
def on_remove(self, schema, fieldname):
pass
# Wrapper base class
class FieldWrapper(FieldType):
def __init__(self, subfield, prefix):
if isinstance(subfield, type):
subfield = subfield()
self.subfield = subfield
self.name_prefix = prefix
# By default we'll copy all the subfield's attributes -- override these
# in subclass constructor for things you want to change
self.analyzer = subfield.analyzer
self.format = subfield.format
self.column_type = subfield.column_type
self.scorable = subfield.scorable
self.stored = subfield.stored
self.unique = subfield.unique
self.indexed = subfield.indexed
self.vector = subfield.vector
def __eq__(self, other):
return self.subfield.__eq__(other)
def __ne__(self, other):
return self.subfield.__ne__(other)
# Text
# def index(self, value, boost=1.0, **kwargs):
# return self.subfield.index(value, boost, **kwargs)
#
# def tokenize(self, value, **kwargs):
# return self.subfield.tokenize(value, **kwargs)
#
# def process_text(self, qstring, mode='', **kwargs):
# return self.subfield.process_text(qstring, mode, **kwargs)
# Conversion
def to_bytes(self, value):
return self.subfield.to_bytes(value)
def to_column_value(self, value):
return self.subfield.to_column_value(value)
def from_bytes(self, bs):
return self.subfield.from_bytes(bs)
def from_column_value(self, value):
return self.subfield.from_column_value(value)
# Sorting/columns
def set_sortable(self, sortable):
self.subfield.set_sortable(sortable)
def sortable_terms(self, ixreader, fieldname):
return self.subfield.sortable_terms(ixreader, fieldname)
def default_column(self):
return self.subfield.default_column()
# Parsing
def self_parsing(self):
return self.subfield.self_parsing()
def parse_query(self, fieldname, qstring, boost=1.0):
return self.subfield.parse_query(fieldname, qstring, boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl, boost=1.0):
self.subfield.parse_range(fieldname, start, end, startexcl, endexcl,
boost)
# Utility
def subfields(self):
# The default FieldWrapper.subfields() implementation DOES NOT split
# out the subfield here -- you need to override if that's what you want
yield "", self
def supports(self, name):
return self.subfield.supports(name)
def clean(self):
self.subfield.clean()
# Events
def on_add(self, schema, fieldname):
self.subfield.on_add(schema, fieldname)
def on_remove(self, schema, fieldname):
self.subfield.on_remove(schema, fieldname)
# Pre-configured field types
class ID(FieldType):
"""
Configured field type that indexes the entire value of the field as one
token. This is useful for data you don't want to tokenize, such as the path
of a file.
"""
def __init__(self, stored=False, unique=False, field_boost=1.0,
sortable=False, analyzer=None):
"""
:param stored: Whether the value of this field is stored with the
document.
"""
self.analyzer = analyzer or analysis.IDAnalyzer()
# Don't store any information other than the doc ID
self.format = formats.Existence(field_boost=field_boost)
self.stored = stored
self.unique = unique
self.set_sortable(sortable)
class IDLIST(FieldType):
"""
Configured field type for fields containing IDs separated by whitespace
and/or punctuation (or anything else, using the expression param).
"""
def __init__(self, stored=False, unique=False, expression=None,
field_boost=1.0):
"""
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
:param expression: The regular expression object to use to extract
tokens. The default expression breaks tokens on CRs, LFs, tabs,
spaces, commas, and semicolons.
"""
expression = expression or re.compile(r"[^\r\n\t ,;]+")
self.analyzer = analysis.RegexAnalyzer(expression=expression)
# Don't store any information other than the doc ID
self.format = formats.Existence(field_boost=field_boost)
self.stored = stored
self.unique = unique
class NUMERIC(FieldType):
"""
Special field type that lets you index integer or floating point
numbers in relatively short fixed-width terms. The field converts numbers
to sortable bytes for you before indexing.
You specify the numeric type of the field (``int`` or ``float``) when you
create the ``NUMERIC`` object. The default is ``int``. For ``int``, you can
specify a size in bits (``32`` or ``64``). For both ``int`` and ``float``
you can specify a ``signed`` keyword argument (default is ``True``).
>>> schema = Schema(path=STORED, position=NUMERIC(int, 64, signed=False))
>>> ix = storage.create_index(schema)
>>> with ix.writer() as w:
... w.add_document(path="/a", position=5820402204)
...
You can also use the NUMERIC field to store Decimal instances by specifying
a type of ``int`` or ``long`` and the ``decimal_places`` keyword argument.
This simply multiplies each number by ``(10 ** decimal_places)`` before
storing it as an integer. Of course this may throw away decimal prcesision
(by truncating, not rounding) and imposes the same maximum value limits as
``int``/``long``, but these may be acceptable for certain applications.
>>> from decimal import Decimal
>>> schema = Schema(path=STORED, position=NUMERIC(int, decimal_places=4))
>>> ix = storage.create_index(schema)
>>> with ix.writer() as w:
... w.add_document(path="/a", position=Decimal("123.45")
...
"""
def __init__(self, numtype=int, bits=32, stored=False, unique=False,
field_boost=1.0, decimal_places=0, shift_step=4, signed=True,
sortable=False, default=None):
"""
:param numtype: the type of numbers that can be stored in this field,
either ``int``, ``float``. If you use ``Decimal``,
use the ``decimal_places`` argument to control how many decimal
places the field will store.
:param bits: When ``numtype`` is ``int``, the number of bits to use to
store the number: 8, 16, 32, or 64.
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
:param decimal_places: specifies the number of decimal places to save
when storing Decimal instances. If you set this, you will always
get Decimal instances back from the field.
:param shift_steps: The number of bits of precision to shift away at
each tiered indexing level. Values should generally be 1-8. Lower
values yield faster searches but take up more space. A value
of `0` means no tiered indexing.
:param signed: Whether the numbers stored in this field may be
negative.
"""
# Allow users to specify strings instead of Python types in case
# docstring isn't clear
if numtype == "int":
numtype = int
if numtype == "float":
numtype = float
# Raise an error if the user tries to use a type other than int or
# float
if numtype is Decimal:
numtype = int
if not decimal_places:
raise TypeError("To store Decimal instances, you must set the "
"decimal_places argument")
elif numtype not in (int, float):
raise TypeError("Can't use %r as a type, use int or float"
% numtype)
# Sanity check
if numtype is float and decimal_places:
raise Exception("A float type and decimal_places argument %r are "
"incompatible" % decimal_places)
intsizes = [8, 16, 32, 64]
intcodes = ["B", "H", "I", "Q"]
# Set up field configuration based on type and size
if numtype is float:
bits = 64 # Floats are converted to 64 bit ints
else:
if bits not in intsizes:
raise Exception("Invalid bits %r, use 8, 16, 32, or 64"
% bits)
# Type code for the *sortable* representation
self.sortable_typecode = intcodes[intsizes.index(bits)]
self._struct = struct.Struct(">" + str(self.sortable_typecode))
self.numtype = numtype
self.bits = bits
self.stored = stored
self.unique = unique
self.decimal_places = decimal_places
self.shift_step = shift_step
self.signed = signed
self.analyzer = analysis.IDAnalyzer()
# Don't store any information other than the doc ID
self.format = formats.Existence(field_boost=field_boost)
self.min_value, self.max_value = self._min_max()
# Column configuration
if default is None:
if numtype is int:
default = typecode_max[self.sortable_typecode]
else:
default = NaN
elif not self.is_valid(default):
raise Exception("The default %r is not a valid number for this "
"field" % default)
self.default = default
self.set_sortable(sortable)
def __getstate__(self):
d = self.__dict__.copy()
if "_struct" in d:
del d["_struct"]
return d
def __setstate__(self, d):
self.__dict__.update(d)
self._struct = struct.Struct(">" + str(self.sortable_typecode))
if "min_value" not in d:
d["min_value"], d["max_value"] = self._min_max()
def _min_max(self):
numtype = self.numtype
bits = self.bits
signed = self.signed
# Calculate the minimum and maximum possible values for error checking
min_value = from_sortable(numtype, bits, signed, 0)
max_value = from_sortable(numtype, bits, signed, 2 ** bits - 1)
return min_value, max_value
def default_column(self):
return columns.NumericColumn(self.sortable_typecode,
default=self.default)
def is_valid(self, x):
try:
x = self.to_bytes(x)
except ValueError:
return False
except OverflowError:
return False
return True
def index(self, num, **kwargs):
# If the user gave us a list of numbers, recurse on the list
if isinstance(num, (list, tuple)):
for n in num:
for item in self.index(n):
yield item
return
# word, freq, weight, valuestring
if self.shift_step:
for shift in xrange(0, self.bits, self.shift_step):
yield (self.to_bytes(num, shift), 1, 1.0, emptybytes)
else:
yield (self.to_bytes(num), 1, 1.0, emptybytes)
def prepare_number(self, x):
if x == emptybytes or x is None:
return x
dc = self.decimal_places
if dc and isinstance(x, (string_type, Decimal)):
x = Decimal(x) * (10 ** dc)
elif isinstance(x, Decimal):
raise TypeError("Can't index a Decimal object unless you specified "
"decimal_places on the field")
try:
x = self.numtype(x)
except OverflowError:
raise ValueError("Value %r overflowed number type %r"
% (x, self.numtype))
if x < self.min_value or x > self.max_value:
raise ValueError("Numeric field value %s out of range [%s, %s]"
% (x, self.min_value, self.max_value))
return x
def unprepare_number(self, x):
dc = self.decimal_places
if dc:
s = str(x)
x = Decimal(s[:-dc] + "." + s[-dc:])
return x
def to_column_value(self, x):
if isinstance(x, (list, tuple, array)):
x = x[0]
x = self.prepare_number(x)
return to_sortable(self.numtype, self.bits, self.signed, x)
def from_column_value(self, x):
x = from_sortable(self.numtype, self.bits, self.signed, x)
return self.unprepare_number(x)
def to_bytes(self, x, shift=0):
# Try to avoid re-encoding; this sucks because on Python 2 we can't
# tell the difference between a string and encoded bytes, so we have
# to require the user use unicode when they mean string
if isinstance(x, bytes_type):
return x
if x == emptybytes or x is None:
return self.sortable_to_bytes(0)
x = self.prepare_number(x)
x = to_sortable(self.numtype, self.bits, self.signed, x)
return self.sortable_to_bytes(x, shift)
def sortable_to_bytes(self, x, shift=0):
if shift:
x >>= shift
return pack_byte(shift) + self._struct.pack(x)
def from_bytes(self, bs):
x = self._struct.unpack(bs[1:])[0]
x = from_sortable(self.numtype, self.bits, self.signed, x)
x = self.unprepare_number(x)
return x
def process_text(self, text, **kwargs):
return (self.to_bytes(text),)
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
from whoosh.qparser.common import QueryParserError
if qstring == "*":
return query.Every(fieldname, boost=boost)
if not self.is_valid(qstring):
raise QueryParserError("%r is not a valid number" % qstring)
token = self.to_bytes(qstring)
return query.Term(fieldname, token, boost=boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
from whoosh import query
from whoosh.qparser.common import QueryParserError
if start is not None:
if not self.is_valid(start):
raise QueryParserError("Range start %r is not a valid number"
% start)
start = self.prepare_number(start)
if end is not None:
if not self.is_valid(end):
raise QueryParserError("Range end %r is not a valid number"
% end)
end = self.prepare_number(end)
return query.NumericRange(fieldname, start, end, startexcl, endexcl,
boost=boost)
def sortable_terms(self, ixreader, fieldname):
zero = b"\x00"
for token in ixreader.lexicon(fieldname):
if token[0:1] != zero:
# Only yield the full-precision values
break
yield token
class DATETIME(NUMERIC):
"""
Special field type that lets you index datetime objects. The field
converts the datetime objects to sortable text for you before indexing.
Since this field is based on Python's datetime module it shares all the
limitations of that module, such as the inability to represent dates before
year 1 in the proleptic Gregorian calendar. However, since this field
stores datetimes as an integer number of microseconds, it could easily
represent a much wider range of dates if the Python datetime implementation
ever supports them.
>>> schema = Schema(path=STORED, date=DATETIME)
>>> ix = storage.create_index(schema)
>>> w = ix.writer()
>>> w.add_document(path="/a", date=datetime.now())
>>> w.commit()
"""
def __init__(self, stored=False, unique=False, sortable=False):
"""
:param stored: Whether the value of this field is stored with the
document.
:param unique: Whether the value of this field is unique per-document.
"""
super(DATETIME, self).__init__(int, 64, stored=stored,
unique=unique, shift_step=8,
sortable=sortable)
def prepare_datetime(self, x):
from whoosh.util.times import floor
if isinstance(x, text_type):
# For indexing, support same strings as for query parsing --
# convert unicode to datetime object
x = self._parse_datestring(x)
x = floor(x) # this makes most sense (unspecified = lowest)
if isinstance(x, datetime.datetime):
return datetime_to_long(x)
elif isinstance(x, bytes_type):
return x
else:
raise Exception("%r is not a datetime" % (x,))
def to_column_value(self, x):
if isinstance(x, bytes_type):
raise Exception("%r is not a datetime" % (x,))
if isinstance(x, (list, tuple)):
x = x[0]
return self.prepare_datetime(x)
def from_column_value(self, x):
return long_to_datetime(x)
def to_bytes(self, x, shift=0):
x = self.prepare_datetime(x)
return NUMERIC.to_bytes(self, x, shift=shift)
def from_bytes(self, bs):
x = NUMERIC.from_bytes(self, bs)
return long_to_datetime(x)
def _parse_datestring(self, qstring):
# This method parses a very simple datetime representation of the form
# YYYY[MM[DD[hh[mm[ss[uuuuuu]]]]]]
from whoosh.util.times import adatetime, fix, is_void
qstring = qstring.replace(" ", "").replace("-", "").replace(".", "")
year = month = day = hour = minute = second = microsecond = None
if len(qstring) >= 4:
year = int(qstring[:4])
if len(qstring) >= 6:
month = int(qstring[4:6])
if len(qstring) >= 8:
day = int(qstring[6:8])
if len(qstring) >= 10:
hour = int(qstring[8:10])
if len(qstring) >= 12:
minute = int(qstring[10:12])
if len(qstring) >= 14:
second = int(qstring[12:14])
if len(qstring) == 20:
microsecond = int(qstring[14:])
at = fix(adatetime(year, month, day, hour, minute, second,
microsecond))
if is_void(at):
raise Exception("%r is not a parseable date" % qstring)
return at
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
from whoosh.util.times import is_ambiguous
try:
at = self._parse_datestring(qstring)
except:
e = sys.exc_info()[1]
return query.error_query(e)
if is_ambiguous(at):
startnum = datetime_to_long(at.floor())
endnum = datetime_to_long(at.ceil())
return query.NumericRange(fieldname, startnum, endnum)
else:
return query.Term(fieldname, at, boost=boost)
def parse_range(self, fieldname, start, end, startexcl, endexcl,
boost=1.0):
from whoosh import query
if start is None and end is None:
return query.Every(fieldname, boost=boost)
if start is not None:
startdt = self._parse_datestring(start).floor()
start = datetime_to_long(startdt)
if end is not None:
enddt = self._parse_datestring(end).ceil()
end = datetime_to_long(enddt)
return query.NumericRange(fieldname, start, end, boost=boost)
class BOOLEAN(FieldType):
"""
Special field type that lets you index boolean values (True and False).
The field converts the boolean values to text for you before indexing.
>>> schema = Schema(path=STORED, done=BOOLEAN)
>>> ix = storage.create_index(schema)
>>> w = ix.writer()
>>> w.add_document(path="/a", done=False)
>>> w.commit()
"""
bytestrings = (b"f", b"t")
trues = frozenset(u"t true yes 1".split())
falses = frozenset(u"f false no 0".split())
def __init__(self, stored=False, field_boost=1.0):
"""
:param stored: Whether the value of this field is stored with the
document.
"""
self.stored = stored
# Don't store any information other than the doc ID
self.format = formats.Existence(field_boost=field_boost)
def _obj_to_bool(self, x):
# We special case strings such as "true", "false", "yes", "no", but
# otherwise call bool() on the query value. This lets you pass objects
# as query values and do the right thing.
if isinstance(x, string_type) and x.lower() in self.trues:
x = True
elif isinstance(x, string_type) and x.lower() in self.falses:
x = False
else:
x = bool(x)
return x
def to_bytes(self, x):
if isinstance(x, bytes_type):
return x
elif isinstance(x, string_type):
x = x.lower() in self.trues
else:
x = bool(x)
bs = self.bytestrings[int(x)]
return bs
def index(self, bit, **kwargs):
if isinstance(bit, string_type):
bit = bit.lower() in self.trues
else:
bit = bool(bit)
# word, freq, weight, valuestring
return [(self.bytestrings[int(bit)], 1, 1.0, emptybytes)]
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
if qstring == "*":
return query.Every(fieldname, boost=boost)
return query.Term(fieldname, self._obj_to_bool(qstring), boost=boost)
class STORED(FieldType):
"""
Configured field type for fields you want to store but not index.
"""
indexed = False
stored = True
def __init__(self):
pass
class COLUMN(FieldType):
"""
Configured field type for fields you want to store as a per-document
value column but not index.
"""
indexed = False
stored = False
def __init__(self, columnobj=None):
if columnobj is None:
columnobj = columns.VarBytesColumn()
if not isinstance(columnobj, columns.Column):
raise TypeError("%r is not a column object" % (columnobj,))
self.column_type = columnobj
def to_bytes(self, v):
return v
def from_bytes(self, b):
return b
class KEYWORD(FieldType):
"""
Configured field type for fields containing space-separated or
comma-separated keyword-like data (such as tags). The default is to not
store positional information (so phrase searching is not allowed in this
field) and to not make the field scorable.
"""
def __init__(self, stored=False, lowercase=False, commas=False,
scorable=False, unique=False, field_boost=1.0, sortable=False,
vector=None):
"""
:param stored: Whether to store the value of the field with the
document.
:param commas: Whether this is a comma-separated field. If this is False
(the default), it is treated as a space-separated field.
:param scorable: Whether this field is scorable.
"""
self.analyzer = analysis.KeywordAnalyzer(lowercase=lowercase,
commas=commas)
# Store field lengths and weights along with doc ID
self.format = formats.Frequency(field_boost=field_boost)
self.scorable = scorable
self.stored = stored
self.unique = unique
if isinstance(vector, formats.Format):
self.vector = vector
elif vector:
self.vector = self.format
else:
self.vector = None
if sortable:
self.column_type = self.default_column()
class TEXT(FieldType):
"""
Configured field type for text fields (for example, the body text of an
article). The default is to store positional information to allow phrase
searching. This field type is always scorable.
"""
def __init__(self, analyzer=None, phrase=True, chars=False, stored=False,
field_boost=1.0, multitoken_query="default", spelling=False,
sortable=False, lang=None, vector=None,
spelling_prefix="spell_"):
"""
:param analyzer: The analysis.Analyzer to use to index the field
contents. See the analysis module for more information. If you omit
this argument, the field uses analysis.StandardAnalyzer.
:param phrase: Whether the store positional information to allow phrase
searching.
:param chars: Whether to store character ranges along with positions.
If this is True, "phrase" is also implied.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param spelling: if True, and if the field's analyzer changes the form
of term text (such as a stemming analyzer), this field will store
extra information in a separate field (named using the
``spelling_prefix`` keyword argument) to allow spelling suggestions
to use the unchanged word forms as spelling suggestions.
:param sortable: If True, make this field sortable using the default
column type. If you pass a :class:`whoosh.columns.Column` instance
instead of True, the field will use the given column type.
:param lang: automaticaly configure a
:class:`whoosh.analysis.LanguageAnalyzer` for the given language.
This is ignored if you also specify an ``analyzer``.
:param vector: if this value evaluates to true, store a list of the
terms in this field in each document. If the value is an instance
of :class:`whoosh.formats.Format`, the index will use the object to
store the term vector. Any other true value (e.g. ``vector=True``)
will use the field's index format to store the term vector as well.
"""
if analyzer:
self.analyzer = analyzer
elif lang:
self.analyzer = analysis.LanguageAnalyzer(lang)
else:
self.analyzer = analysis.StandardAnalyzer()
if chars:
formatclass = formats.Characters
elif phrase:
formatclass = formats.Positions
else:
formatclass = formats.Frequency
self.format = formatclass(field_boost=field_boost)
if sortable:
if isinstance(sortable, columns.Column):
self.column_type = sortable
else:
self.column_type = columns.VarBytesColumn()
else:
self.column_type = None
self.spelling = spelling
self.spelling_prefix = spelling_prefix
self.multitoken_query = multitoken_query
self.scorable = True
self.stored = stored
if isinstance(vector, formats.Format):
self.vector = vector
elif vector:
self.vector = self.format
else:
self.vector = None
def subfields(self):
yield "", self
# If the user indicated this is a spellable field, and the analyzer
# is morphic, then also index into a spelling-only field that stores
# minimal information
if self.separate_spelling():
yield self.spelling_prefix, SpellField(self.analyzer)
def separate_spelling(self):
return self.spelling and self.analyzer.has_morph()
def spelling_fieldname(self, fieldname):
if self.separate_spelling():
return self.spelling_prefix + fieldname
else:
return fieldname
class SpellField(FieldType):
"""
This is a utility field type meant to be returned by ``TEXT.subfields()``
when it needs a minimal field to store the spellable words.
"""
def __init__(self, analyzer):
self.format = formats.Frequency()
self.analyzer = analyzer
self.column_type = None
self.scorabe = False
self.stored = False
self.unique = False
self.indexed = True
self.spelling = False
# All the text analysis methods add "nomorph" to the keywords to get
# unmorphed term texts
def index(self, value, boost=1.0, **kwargs):
kwargs["nomorph"] = True
return FieldType.index(self, value, boost=boost, **kwargs)
def tokenzie(self, value, **kwargs):
kwargs["nomorph"] = True
return FieldType.tokenize(self, value, **kwargs)
def process_text(self, qstring, mode='', **kwargs):
kwargs["nomorph"] = True
return FieldType.process_text(self, qstring, mode=mode, **kwargs)
class NGRAM(FieldType):
"""
Configured field that indexes text as N-grams. For example, with a field
type NGRAM(3,4), the value "hello" will be indexed as tokens
"hel", "hell", "ell", "ello", "llo". This field type chops the entire text
into N-grams, including whitespace and punctuation. See :class:`NGRAMWORDS`
for a field type that breaks the text into words first before chopping the
words into N-grams.
"""
scorable = True
def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
queryor=False, phrase=False, sortable=False):
"""
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param queryor: if True, combine the N-grams with an Or query. The
default is to combine N-grams with an And query.
:param phrase: store positions on the N-grams to allow exact phrase
searching. The default is off.
"""
formatclass = formats.Frequency
if phrase:
formatclass = formats.Positions
self.analyzer = analysis.NgramAnalyzer(minsize, maxsize)
self.format = formatclass(field_boost=field_boost)
self.analyzer = analysis.NgramAnalyzer(minsize, maxsize)
self.stored = stored
self.queryor = queryor
self.set_sortable(sortable)
def self_parsing(self):
return True
def parse_query(self, fieldname, qstring, boost=1.0):
from whoosh import query
terms = [query.Term(fieldname, g)
for g in self.process_text(qstring, mode='query')]
cls = query.Or if self.queryor else query.And
return cls(terms, boost=boost)
class NGRAMWORDS(NGRAM):
"""
Configured field that chops text into words using a tokenizer,
lowercases the words, and then chops the words into N-grams.
"""
scorable = True
def __init__(self, minsize=2, maxsize=4, stored=False, field_boost=1.0,
tokenizer=None, at=None, queryor=False, sortable=False):
"""
:param minsize: The minimum length of the N-grams.
:param maxsize: The maximum length of the N-grams.
:param stored: Whether to store the value of this field with the
document. Since this field type generally contains a lot of text,
you should avoid storing it with the document unless you need to,
for example to allow fast excerpts in the search results.
:param tokenizer: an instance of :class:`whoosh.analysis.Tokenizer`
used to break the text into words.
:param at: if 'start', only takes N-grams from the start of the word.
If 'end', only takes N-grams from the end. Otherwise the default
is to take all N-grams from each word.
:param queryor: if True, combine the N-grams with an Or query. The
default is to combine N-grams with an And query.
"""
self.analyzer = analysis.NgramWordAnalyzer(minsize, maxsize, tokenizer,
at=at)
self.format = formats.Frequency(field_boost=field_boost)
self.stored = stored
self.queryor = queryor
self.set_sortable(sortable)
# Other fields
class ReverseField(FieldWrapper):
def __init__(self, subfield, prefix="rev_"):
FieldWrapper.__init__(self, subfield, prefix)
self.analyzer = subfield.analyzer | analysis.ReverseTextFilter()
self.format = BasicFormat(lengths=False, weights=False)
self.scorable = False
self.set_sortable(False)
self.stored = False
self.unique = False
self.vector = False
def subfields(self):
yield "", self.subfield
yield self.name_prefix, self
# Schema class
class MetaSchema(type):
def __new__(cls, name, bases, attrs):
super_new = super(MetaSchema, cls).__new__
if not any(b for b in bases if isinstance(b, MetaSchema)):
# If this isn't a subclass of MetaSchema, don't do anything special
return super_new(cls, name, bases, attrs)
# Create the class
special_attrs = {}
for key in list(attrs.keys()):
if key.startswith("__"):
special_attrs[key] = attrs.pop(key)
new_class = super_new(cls, name, bases, special_attrs)
fields = {}
for b in bases:
if hasattr(b, "_clsfields"):
fields.update(b._clsfields)
fields.update(attrs)
new_class._clsfields = fields
return new_class
def schema(self):
return Schema(**self._clsfields)
class Schema(object):
"""
Represents the collection of fields in an index. Maps field names to
FieldType objects which define the behavior of each field.
Low-level parts of the index use field numbers instead of field names for
compactness. This class has several methods for converting between the
field name, field number, and field object itself.
"""
def __init__(self, **fields):
"""
All keyword arguments to the constructor are treated as fieldname =
fieldtype pairs. The fieldtype can be an instantiated FieldType object,
or a FieldType sub-class (in which case the Schema will instantiate it
with the default constructor before adding it).
For example::
s = Schema(content = TEXT,
title = TEXT(stored = True),
tags = KEYWORD(stored = True))
"""
self._fields = {}
self._subfields = {}
self._dyn_fields = {}
for name in sorted(fields.keys()):
self.add(name, fields[name])
def copy(self):
"""
Returns a shallow copy of the schema. The field instances are not
deep copied, so they are shared between schema copies.
"""
return self.__class__(**self._fields)
def __eq__(self, other):
return (other.__class__ is self.__class__
and list(self.items()) == list(other.items()))
def __ne__(self, other):
return not(self.__eq__(other))
def __repr__(self):
return "<%s: %r>" % (self.__class__.__name__, self.names())
def __iter__(self):
"""
Returns the field objects in this schema.
"""
return iter(self._fields.values())
def __getitem__(self, name):
"""
Returns the field associated with the given field name.
"""
# If the name is in the dictionary, just return it
if name in self._fields:
return self._fields[name]
# Check if the name matches a dynamic field
for expr, fieldtype in itervalues(self._dyn_fields):
if expr.match(name):
return fieldtype
raise KeyError("No field named %r" % (name,))
def __len__(self):
"""
Returns the number of fields in this schema.
"""
return len(self._fields)
def __contains__(self, fieldname):
"""
Returns True if a field by the given name is in this schema.
"""
# Defined in terms of __getitem__ so that there's only one method to
# override to provide dynamic fields
try:
field = self[fieldname]
return field is not None
except KeyError:
return False
def __setstate__(self, state):
if "_subfields" not in state:
state["_subfields"] = {}
self.__dict__.update(state)
def to_bytes(self, fieldname, value):
return self[fieldname].to_bytes(value)
def items(self):
"""
Returns a list of ("fieldname", field_object) pairs for the fields
in this schema.
"""
return sorted(self._fields.items())
def names(self, check_names=None):
"""
Returns a list of the names of the fields in this schema.
:param check_names: (optional) sequence of field names to check
whether the schema accepts them as (dynamic) field names -
acceptable names will also be in the result list.
Note: You may also have static field names in check_names, that
won't create duplicates in the result list. Unsupported names
will not be in the result list.
"""
fieldnames = set(self._fields.keys())
if check_names is not None:
check_names = set(check_names) - fieldnames
fieldnames.update(fieldname for fieldname in check_names
if fieldname in self)
return sorted(fieldnames)
def clean(self):
for field in self:
field.clean()
def add(self, name, fieldtype, glob=False):
"""
Adds a field to this schema.
:param name: The name of the field.
:param fieldtype: An instantiated fields.FieldType object, or a
FieldType subclass. If you pass an instantiated object, the schema
will use that as the field configuration for this field. If you
pass a FieldType subclass, the schema will automatically
instantiate it with the default constructor.
"""
# If the user passed a type rather than an instantiated field object,
# instantiate it automatically
if type(fieldtype) is type:
try:
fieldtype = fieldtype()
except:
e = sys.exc_info()[1]
raise FieldConfigurationError("Error: %s instantiating field "
"%r: %r" % (e, name, fieldtype))
if not isinstance(fieldtype, FieldType):
raise FieldConfigurationError("%r is not a FieldType object"
% fieldtype)
self._subfields[name] = sublist = []
for prefix, subfield in fieldtype.subfields():
fname = prefix + name
sublist.append(fname)
# Check field name
if fname.startswith("_"):
raise FieldConfigurationError("Names cannot start with _")
elif " " in fname:
raise FieldConfigurationError("Names cannot contain spaces")
elif fname in self._fields or (glob and fname in self._dyn_fields):
raise FieldConfigurationError("%r already in schema" % fname)
# Add the field
if glob:
expr = re.compile(fnmatch.translate(name))
self._dyn_fields[fname] = (expr, subfield)
else:
fieldtype.on_add(self, fname)
self._fields[fname] = subfield
def remove(self, fieldname):
if fieldname in self._fields:
self._fields[fieldname].on_remove(self, fieldname)
del self._fields[fieldname]
if fieldname in self._subfields:
for subname in self._subfields[fieldname]:
if subname in self._fields:
del self._fields[subname]
del self._subfields[fieldname]
elif fieldname in self._dyn_fields:
del self._dyn_fields[fieldname]
else:
raise KeyError("No field named %r" % fieldname)
def indexable_fields(self, fieldname):
if fieldname in self._subfields:
for subname in self._subfields[fieldname]:
yield subname, self._fields[subname]
else:
# Use __getitem__ here instead of getting it directly from _fields
# because it might be a glob
yield fieldname, self[fieldname]
def has_scorable_fields(self):
return any(ftype.scorable for ftype in self)
def stored_names(self):
"""
Returns a list of the names of fields that are stored.
"""
return [name for name, field in self.items() if field.stored]
def scorable_names(self):
"""
Returns a list of the names of fields that store field
lengths.
"""
return [name for name, field in self.items() if field.scorable]
class SchemaClass(with_metaclass(MetaSchema, Schema)):
"""
Allows you to define a schema using declarative syntax, similar to
Django models::
class MySchema(SchemaClass):
path = ID
date = DATETIME
content = TEXT
You can use inheritance to share common fields between schemas::
class Parent(SchemaClass):
path = ID(stored=True)
date = DATETIME
class Child1(Parent):
content = TEXT(positions=False)
class Child2(Parent):
tags = KEYWORD
This class overrides ``__new__`` so instantiating your sub-class always
results in an instance of ``Schema``.
>>> class MySchema(SchemaClass):
... title = TEXT(stored=True)
... content = TEXT
...
>>> s = MySchema()
>>> type(s)
<class 'whoosh.fields.Schema'>
"""
def __new__(cls, *args, **kwargs):
obj = super(Schema, cls).__new__(Schema)
kw = getattr(cls, "_clsfields", {})
kw.update(kwargs)
obj.__init__(*args, **kw)
return obj
def ensure_schema(schema):
if isinstance(schema, type) and issubclass(schema, Schema):
schema = schema.schema()
if not isinstance(schema, Schema):
raise FieldConfigurationError("%r is not a Schema" % schema)
return schema
def merge_fielddict(d1, d2):
keyset = set(d1.keys()) | set(d2.keys())
out = {}
for name in keyset:
field1 = d1.get(name)
field2 = d2.get(name)
if field1 and field2 and field1 != field2:
raise Exception("Inconsistent field %r: %r != %r"
% (name, field1, field2))
out[name] = field1 or field2
return out
def merge_schema(s1, s2):
schema = Schema()
schema._fields = merge_fielddict(s1._fields, s2._fields)
schema._dyn_fields = merge_fielddict(s1._dyn_fields, s2._dyn_fields)
return schema
def merge_schemas(schemas):
schema = schemas[0]
for i in xrange(1, len(schemas)):
schema = merge_schema(schema, schemas[i])
return schema
| bsd-3-clause |
sudosurootdev/external_chromium_org | chrome/common/extensions/docs/server2/file_system.py | 25 | 7914 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import posixpath
import traceback
from future import Future
from path_util import (
AssertIsDirectory, AssertIsValid, IsDirectory, IsValid, SplitParent,
ToDirectory)
def IsFileSystemThrottledError(error):
return type(error).__name__ == 'FileSystemThrottledError'
class _BaseFileSystemException(Exception):
def __init__(self, message):
Exception.__init__(self, message)
@classmethod
def RaiseInFuture(cls, message):
stack = traceback.format_stack()
def boom(): raise cls('%s. Creation stack:\n%s' % (message, ''.join(stack)))
return Future(callback=boom)
class FileNotFoundError(_BaseFileSystemException):
'''Raised when a file isn't found for read or stat.
'''
def __init__(self, filename):
_BaseFileSystemException.__init__(self, filename)
class FileSystemThrottledError(_BaseFileSystemException):
'''Raised when access to a file system resource is temporarily unavailable
due to service throttling.
'''
def __init__(self, filename):
_BaseFileSystemException.__init__(self, filename)
class FileSystemError(_BaseFileSystemException):
'''Raised on when there are errors reading or statting files, such as a
network timeout.
'''
def __init__(self, filename):
_BaseFileSystemException.__init__(self, filename)
class StatInfo(object):
'''The result of calling Stat on a FileSystem.
'''
def __init__(self, version, child_versions=None):
if child_versions:
assert all(IsValid(path) for path in child_versions.iterkeys()), \
child_versions
self.version = version
self.child_versions = child_versions
def __eq__(self, other):
return (isinstance(other, StatInfo) and
self.version == other.version and
self.child_versions == other.child_versions)
def __ne__(self, other):
return not (self == other)
def __str__(self):
return '{version: %s, child_versions: %s}' % (self.version,
self.child_versions)
def __repr__(self):
return str(self)
class FileSystem(object):
'''A FileSystem interface that can read files and directories.
'''
def Read(self, paths, skip_not_found=False):
'''Reads each file in paths and returns a dictionary mapping the path to the
contents. If a path in paths ends with a '/', it is assumed to be a
directory, and a list of files in the directory is mapped to the path.
The contents will be a str.
If any path cannot be found:
- If |skip_not_found| is True, the resulting object will not contain any
mapping for that path.
- Otherwise, and by default, a FileNotFoundError is raised. This is
guaranteed to only happen once the Future has been resolved (Get()
called).
For any other failure, raises a FileSystemError.
'''
raise NotImplementedError(self.__class__)
def ReadSingle(self, path, skip_not_found=False):
'''Reads a single file from the FileSystem. Returns a Future with the same
rules as Read(). If |path| is not found raise a FileNotFoundError on Get(),
or if |skip_not_found| is True then return None.
'''
AssertIsValid(path)
read_single = self.Read([path], skip_not_found=skip_not_found)
return Future(callback=lambda: read_single.Get().get(path, None))
def Exists(self, path):
'''Returns a Future to the existence of |path|; True if |path| exists,
False if not. This method will not throw a FileNotFoundError unlike
the Read* methods, however it may still throw a FileSystemError.
There are several ways to implement this method via the interface but this
method exists to do so in a canonical and most efficient way for caching.
'''
AssertIsValid(path)
if path == '':
# There is always a root directory.
return Future(value=True)
parent, base = SplitParent(path)
def handle(error):
if isinstance(error, FileNotFoundError):
return False
raise error
return self.ReadSingle(ToDirectory(parent)).Then(lambda l: base in l,
handle)
def Refresh(self):
'''Asynchronously refreshes the content of the FileSystem, returning a
future to its completion.
'''
raise NotImplementedError(self.__class__)
# TODO(cduvall): Allow Stat to take a list of paths like Read.
def Stat(self, path):
'''DEPRECATED: Please try to use StatAsync instead.
Returns a |StatInfo| object containing the version of |path|. If |path|
is a directory, |StatInfo| will have the versions of all the children of
the directory in |StatInfo.child_versions|.
If the path cannot be found, raises a FileNotFoundError.
For any other failure, raises a FileSystemError.
'''
# Delegate to this implementation's StatAsync if it has been implemented.
if type(self).StatAsync != FileSystem.StatAsync:
return self.StatAsync(path).Get()
raise NotImplementedError(self.__class__)
def StatAsync(self, path):
'''An async version of Stat. Returns a Future to a StatInfo rather than a
raw StatInfo.
This is a bandaid for a lack of an async Stat function. Stat() should be
async by default but for now just let implementations override this if they
like.
'''
return Future(callback=lambda: self.Stat(path))
def GetIdentity(self):
'''The identity of the file system, exposed for caching classes to
namespace their caches. this will usually depend on the configuration of
that file system - e.g. a LocalFileSystem with a base path of /var is
different to that of a SubversionFileSystem with a base path of /bar, is
different to a LocalFileSystem with a base path of /usr.
'''
raise NotImplementedError(self.__class__)
def Walk(self, root, depth=-1, file_lister=None):
'''Recursively walk the directories in a file system, starting with root.
Behaviour is very similar to os.walk from the standard os module, yielding
(base, dirs, files) recursively, where |base| is the base path of |files|,
|dirs| relative to |root|, and |files| and |dirs| the list of files/dirs in
|base| respectively. If |depth| is specified and greater than 0, Walk will
only recurse |depth| times.
|file_lister|, if specified, should be a callback of signature
def my_file_lister(root):,
which returns a tuple (dirs, files), where |dirs| is a list of directory
names under |root|, and |files| is a list of file names under |root|. Note
that the listing of files and directories should be for a *single* level
only, i.e. it should not recursively list anything.
Note that directories will always end with a '/', files never will.
If |root| cannot be found, raises a FileNotFoundError.
For any other failure, raises a FileSystemError.
'''
AssertIsDirectory(root)
basepath = root
def walk(root, depth):
if depth == 0:
return
AssertIsDirectory(root)
if file_lister:
dirs, files = file_lister(root)
else:
dirs, files = [], []
for f in self.ReadSingle(root).Get():
if IsDirectory(f):
dirs.append(f)
else:
files.append(f)
yield root[len(basepath):].rstrip('/'), dirs, files
for d in dirs:
for walkinfo in walk(root + d, depth - 1):
yield walkinfo
for walkinfo in walk(root, depth):
yield walkinfo
def __eq__(self, other):
return (isinstance(other, FileSystem) and
self.GetIdentity() == other.GetIdentity())
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return '<%s>' % type(self).__name__
def __str__(self):
return repr(self)
| bsd-3-clause |
Endika/c2c-rd-addons | hr_attendance_sheet_check/__openerp__.py | 4 | 1635 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{ 'sequence': 500,
'name' : 'check sign in-out on sheet level'
, 'version' : '0.7'
, 'category' : 'HR'
, 'description' : """
This module checks sign in out sequence when saving the sheet
thus allowing to enter past sign_in_out dates/times
"""
, 'author' : 'ChriCar Beteiligungs- und Beratungs- GmbH'
, 'depends' : ['hr_attendance','hr_timesheet_sheet' ]
, 'data' : ['hr_attendance_checkbox.xml']
, 'demo_xml' : []
, 'installable': False
, 'active' : False
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yask123/django | tests/template_tests/filter_tests/test_linebreaksbr.py | 331 | 1742 | from django.template.defaultfilters import linebreaksbr
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class LinebreaksbrTests(SimpleTestCase):
"""
The contents in "linebreaksbr" are escaped according to the current
autoescape setting.
"""
@setup({'linebreaksbr01': '{{ a|linebreaksbr }} {{ b|linebreaksbr }}'})
def test_linebreaksbr01(self):
output = self.engine.render_to_string('linebreaksbr01', {"a": "x&\ny", "b": mark_safe("x&\ny")})
self.assertEqual(output, "x&<br />y x&<br />y")
@setup({'linebreaksbr02':
'{% autoescape off %}{{ a|linebreaksbr }} {{ b|linebreaksbr }}{% endautoescape %}'})
def test_linebreaksbr02(self):
output = self.engine.render_to_string('linebreaksbr02', {"a": "x&\ny", "b": mark_safe("x&\ny")})
self.assertEqual(output, "x&<br />y x&<br />y")
class FunctionTests(SimpleTestCase):
def test_newline(self):
self.assertEqual(linebreaksbr('line 1\nline 2'), 'line 1<br />line 2')
def test_carriage(self):
self.assertEqual(linebreaksbr('line 1\rline 2'), 'line 1<br />line 2')
def test_carriage_newline(self):
self.assertEqual(linebreaksbr('line 1\r\nline 2'), 'line 1<br />line 2')
def test_non_string_input(self):
self.assertEqual(linebreaksbr(123), '123')
def test_autoescape(self):
self.assertEqual(
linebreaksbr('foo\n<a>bar</a>\nbuz'),
'foo<br /><a>bar</a><br />buz',
)
def test_autoescape_off(self):
self.assertEqual(
linebreaksbr('foo\n<a>bar</a>\nbuz', autoescape=False),
'foo<br /><a>bar</a><br />buz',
)
| bsd-3-clause |
zadgroup/edx-platform | common/djangoapps/xblock_django/migrations/0001_initial.py | 79 | 4946 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'XBlockDisableConfig'
db.create_table('xblock_django_xblockdisableconfig', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('change_date', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('changed_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, on_delete=models.PROTECT)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=False)),
('disabled_blocks', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
))
db.send_create_signal('xblock_django', ['XBlockDisableConfig'])
def backwards(self, orm):
# Deleting model 'XBlockDisableConfig'
db.delete_table('xblock_django_xblockdisableconfig')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'xblock_django.xblockdisableconfig': {
'Meta': {'ordering': "('-change_date',)", 'object_name': 'XBlockDisableConfig'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'disabled_blocks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['xblock_django'] | agpl-3.0 |
nandhp/youtube-dl | youtube_dl/extractor/tlc.py | 15 | 1578 | # encoding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .brightcove import BrightcoveLegacyIE
from ..compat import compat_parse_qs
class TlcDeIE(InfoExtractor):
IE_NAME = 'tlc.de'
_VALID_URL = r'https?://www\.tlc\.de/(?:[^/]+/)*videos/(?P<title>[^/?#]+)?(?:.*#(?P<id>\d+))?'
_TEST = {
'url': 'http://www.tlc.de/sendungen/breaking-amish/videos/#3235167922001',
'info_dict': {
'id': '3235167922001',
'ext': 'mp4',
'title': 'Breaking Amish: Die Welt da draußen',
'description': (
'Vier Amische und eine Mennonitin wagen in New York'
' den Sprung in ein komplett anderes Leben. Begleitet sie auf'
' ihrem spannenden Weg.'),
'timestamp': 1396598084,
'upload_date': '20140404',
'uploader_id': '1659832546',
},
}
BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1659832546/default_default/index.html?videoId=%s'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
brightcove_id = mobj.group('id')
if not brightcove_id:
title = mobj.group('title')
webpage = self._download_webpage(url, title)
brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage)
brightcove_id = compat_parse_qs(brightcove_legacy_url)['@videoPlayer'][0]
return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
| unlicense |
OMS-NetZero/FAIR | setup.py | 1 | 1405 | from setuptools import setup
from setuptools import find_packages
import versioneer
# README #
def readme():
with open('README.rst') as f:
return f.read()
AUTHORS = [
("Chris Smith", "c.j.smith1@leeds.ac.uk"),
("Richard Millar", "richard.millar@ouce.ox.ac.uk"),
("Zeb Nicholls", "zebedee.nicholls@climate-energy-college.org"),
("Myles Allen", "myles.allen@ouce.ox.ac.uk"),
]
setup(
name='fair',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Python package to perform calculations with the FaIR simple climate model',
long_description=readme(),
keywords='simple climate model temperature response carbon cycle emissions forcing',
url='https://github.com/OMS-NetZero/FAIR',
author=", ".join([author[0] for author in AUTHORS]),
author_email=", ".join([author[1] for author in AUTHORS]),
license='Apache 2.0',
packages=find_packages(exclude=['docs*']),
package_data={'': ['*.csv']},
python_requires='>=3.6, <4',
include_package_data=True,
install_requires=[
'matplotlib',
'numpy>=1.14.5',
'scipy>=0.19.0',
'pandas',
],
zip_safe=False,
extras_require={
'docs': ['sphinx>=1.4', 'nbsphinx'],
'dev' : ['notebook', 'scmdata>=0.7.1', 'wheel', 'twine'],
'test': ['pytest>=4.0', 'nbval', 'pytest-cov', 'codecov']
}
)
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/_common_serialization.py | 6 | 6340 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import ast
import sys
from datetime import datetime
from xml.sax.saxutils import escape as xml_escape
from ._common_models import (
WindowsAzureData,
HeaderDict,
_unicode_type,
)
# TODO: check if this is used
def _get_readable_id(id_name, id_prefix_to_skip):
"""simplified an id to be more friendly for us people"""
# id_name is in the form 'https://namespace.host.suffix/name'
# where name may contain a forward slash!
pos = id_name.find('//')
if pos != -1:
pos += 2
if id_prefix_to_skip:
pos = id_name.find(id_prefix_to_skip, pos)
if pos != -1:
pos += len(id_prefix_to_skip)
pos = id_name.find('/', pos)
if pos != -1:
return id_name[pos + 1:]
return id_name
def _create_entry(entry_body):
''' Adds common part of entry to a given entry body and return the whole
xml. '''
updated_str = datetime.utcnow().isoformat()
if datetime.utcnow().utcoffset() is None:
updated_str += '+00:00'
entry_start = '''<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom" >
<title /><updated>{updated}</updated><author><name /></author><id />
<content type="application/xml">
{body}</content></entry>'''
return entry_start.format(updated=updated_str, body=entry_body)
def _to_datetime(strtime):
return datetime.strptime(strtime, "%Y-%m-%dT%H:%M:%S.%f")
_KNOWN_SERIALIZATION_XFORMS = {
'last_modified': 'Last-Modified',
'cache_control': 'Cache-Control',
'account_admin_live_email_id': 'AccountAdminLiveEmailId',
'service_admin_live_email_id': 'ServiceAdminLiveEmailId',
'subscription_id': 'SubscriptionID',
'fqdn': 'FQDN',
'private_id': 'PrivateID',
'os_virtual_hard_disk': 'OSVirtualHardDisk',
'logical_disk_size_in_gb': 'LogicalDiskSizeInGB',
'logical_size_in_gb': 'LogicalSizeInGB',
'os': 'OS',
'persistent_vm_downtime_info': 'PersistentVMDowntimeInfo',
'copy_id': 'CopyId',
'os_state': 'OSState',
'vm_image': 'VMImage',
'vm_images': 'VMImages',
'os_disk_configuration': 'OSDiskConfiguration',
'public_ips': 'PublicIPs',
'public_ip': 'PublicIP',
'supported_os': 'SupportedOS',
'reserved_ip': 'ReservedIP',
'reserved_ips': 'ReservedIPs',
'aad_tenant_id': 'AADTenantID',
'start_ip_address': 'StartIPAddress',
'end_ip_address': 'EndIPAddress',
'operation_id': 'OperationId',
'operation_object_id': 'OperationObjectId',
'client_ip': 'ClientIP',
'status_id': 'ID',
'virtual_ips': 'VirtualIPs',
'virtual_ip': 'VirtualIP',
'recommended_vm_size': 'RecommendedVMSize'
}
def _get_serialization_name(element_name):
"""converts a Python name into a serializable name"""
known = _KNOWN_SERIALIZATION_XFORMS.get(element_name)
if known is not None:
return known
if element_name.startswith('x_ms_'):
return element_name.replace('_', '-')
if element_name.endswith('_id'):
element_name = element_name.replace('_id', 'ID')
for name in ['content_', 'last_modified', 'if_', 'cache_control']:
if element_name.startswith(name):
element_name = element_name.replace('_', '-_')
return ''.join(name.capitalize() for name in element_name.split('_'))
# TODO: check if this is used
def _convert_class_to_xml(source, xml_prefix=True):
if source is None:
return ''
xmlstr = ''
if xml_prefix:
xmlstr = '<?xml version="1.0" encoding="utf-8"?>'
if isinstance(source, list):
for value in source:
xmlstr += _convert_class_to_xml(value, False)
elif isinstance(source, WindowsAzureData):
class_name = source.__class__.__name__
xmlstr += '<' + class_name + '>'
for name, value in vars(source).items():
if value is not None:
if isinstance(value, list) or \
isinstance(value, WindowsAzureData):
xmlstr += _convert_class_to_xml(value, False)
else:
xmlstr += ('<' + _get_serialization_name(name) + '>' +
xml_escape(str(value)) + '</' +
_get_serialization_name(name) + '>')
xmlstr += '</' + class_name + '>'
return xmlstr
def _set_continuation_from_response_headers(feeds, response):
x_ms_continuation = HeaderDict()
for name, value in response.headers:
if 'x-ms-continuation' in name:
x_ms_continuation[name[len('x-ms-continuation') + 1:]] = value
if x_ms_continuation:
setattr(feeds, 'x_ms_continuation', x_ms_continuation)
def _get_request_body(request_body):
'''Converts an object into a request body. If it's None
we'll return an empty string, if it's one of our objects it'll
convert it to XML and return it. Otherwise we just use the object
directly'''
if request_body is None:
return b''
# TODO: check if this is used
if isinstance(request_body, WindowsAzureData):
request_body = _convert_class_to_xml(request_body)
if isinstance(request_body, bytes):
return request_body
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
request_body = str(request_body)
if isinstance(request_body, _unicode_type):
return request_body.encode('utf-8')
return request_body
| mit |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/test/test_threadable.py | 25 | 3223 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import sys, pickle
try:
import threading
except ImportError:
threading = None
from twisted.trial import unittest
from twisted.python import threadable
from twisted.internet import defer, reactor
class TestObject:
synchronized = ['aMethod']
x = -1
y = 1
def aMethod(self):
for i in xrange(10):
self.x, self.y = self.y, self.x
self.z = self.x + self.y
assert self.z == 0, "z == %d, not 0 as expected" % (self.z,)
threadable.synchronize(TestObject)
class SynchronizationTestCase(unittest.TestCase):
def setUp(self):
"""
Reduce the CPython check interval so that thread switches happen much
more often, hopefully exercising more possible race conditions. Also,
delay actual test startup until the reactor has been started.
"""
if hasattr(sys, 'getcheckinterval'):
self.addCleanup(sys.setcheckinterval, sys.getcheckinterval())
sys.setcheckinterval(7)
# XXX This is a trial hack. We need to make sure the reactor
# actually *starts* for isInIOThread() to have a meaningful result.
# Returning a Deferred here should force that to happen, if it has
# not happened already. In the future, this should not be
# necessary.
d = defer.Deferred()
reactor.callLater(0, d.callback, None)
return d
def testIsInIOThread(self):
foreignResult = []
t = threading.Thread(target=lambda: foreignResult.append(threadable.isInIOThread()))
t.start()
t.join()
self.failIf(foreignResult[0], "Non-IO thread reported as IO thread")
self.failUnless(threadable.isInIOThread(), "IO thread reported as not IO thread")
def testThreadedSynchronization(self):
o = TestObject()
errors = []
def callMethodLots():
try:
for i in xrange(1000):
o.aMethod()
except AssertionError, e:
errors.append(str(e))
threads = []
for x in range(5):
t = threading.Thread(target=callMethodLots)
threads.append(t)
t.start()
for t in threads:
t.join()
if errors:
raise unittest.FailTest(errors)
def testUnthreadedSynchronization(self):
o = TestObject()
for i in xrange(1000):
o.aMethod()
class SerializationTestCase(unittest.TestCase):
def testPickling(self):
lock = threadable.XLock()
lockType = type(lock)
lockPickle = pickle.dumps(lock)
newLock = pickle.loads(lockPickle)
self.failUnless(isinstance(newLock, lockType))
def testUnpickling(self):
lockPickle = 'ctwisted.python.threadable\nunpickle_lock\np0\n(tp1\nRp2\n.'
lock = pickle.loads(lockPickle)
newPickle = pickle.dumps(lock, 2)
newLock = pickle.loads(newPickle)
if threading is None:
SynchronizationTestCase.testThreadedSynchronization.skip = "Platform lacks thread support"
SerializationTestCase.testPickling.skip = "Platform lacks thread support"
| gpl-2.0 |
eMarco/omap | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
Nexenta/s3-tests | virtualenv/lib/python2.7/site-packages/nose/plugins/collect.py | 96 | 3113 | """
This plugin bypasses the actual execution of tests, and instead just collects
test names. Fixtures are also bypassed, so running nosetests with the
collection plugin enabled should be very quick.
This plugin is useful in combination with the testid plugin (``--with-id``).
Run both together to get an indexed list of all tests, which will enable you to
run individual tests by index number.
This plugin is also useful for counting tests in a test suite, and making
people watching your demo think all of your tests pass.
"""
from nose.plugins.base import Plugin
from nose.case import Test
import logging
import unittest
log = logging.getLogger(__name__)
class CollectOnly(Plugin):
"""
Collect and output test names only, don't run any tests.
"""
name = "collect-only"
enableOpt = 'collect_only'
def options(self, parser, env):
"""Register commandline options.
"""
parser.add_option('--collect-only',
action='store_true',
dest=self.enableOpt,
default=env.get('NOSE_COLLECT_ONLY'),
help="Enable collect-only: %s [COLLECT_ONLY]" %
(self.help()))
def prepareTestLoader(self, loader):
"""Install collect-only suite class in TestLoader.
"""
# Disable context awareness
log.debug("Preparing test loader")
loader.suiteClass = TestSuiteFactory(self.conf)
def prepareTestCase(self, test):
"""Replace actual test with dummy that always passes.
"""
# Return something that always passes
log.debug("Preparing test case %s", test)
if not isinstance(test, Test):
return
def run(result):
# We need to make these plugin calls because there won't be
# a result proxy, due to using a stripped-down test suite
self.conf.plugins.startTest(test)
result.startTest(test)
self.conf.plugins.addSuccess(test)
result.addSuccess(test)
self.conf.plugins.stopTest(test)
result.stopTest(test)
return run
class TestSuiteFactory:
"""
Factory for producing configured test suites.
"""
def __init__(self, conf):
self.conf = conf
def __call__(self, tests=(), **kw):
return TestSuite(tests, conf=self.conf)
class TestSuite(unittest.TestSuite):
"""
Basic test suite that bypasses most proxy and plugin calls, but does
wrap tests in a nose.case.Test so prepareTestCase will be called.
"""
def __init__(self, tests=(), conf=None):
self.conf = conf
# Exec lazy suites: makes discovery depth-first
if callable(tests):
tests = tests()
log.debug("TestSuite(%r)", tests)
unittest.TestSuite.__init__(self, tests)
def addTest(self, test):
log.debug("Add test %s", test)
if isinstance(test, unittest.TestSuite):
self._tests.append(test)
else:
self._tests.append(Test(test, config=self.conf))
| mit |
parpg/parpg | parpg/dialoguecontroller.py | 1 | 2557 | # This file is part of PARPG.
# PARPG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# PARPG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with PARPG. If not, see <http://www.gnu.org/licenses/>.
from controllerbase import ControllerBase
class DialogueController(ControllerBase):
"""Controller that takes over when a dialogue is started"""
def __init__(self,
engine,
view,
model,
application):
"""
Constructor
@param engine: Instance of the active fife engine
@type engine: fife.Engine
@param view: Instance of a GameSceneView
@param type: parpg.GameSceneView
@param model: The model that has the current gamestate
@type model: parpg.GameModel
@param application: The application that created this controller
@type application: parpg.PARPGApplication
@param settings: The current settings of the application
@type settings: fife.extensions.fife_settings.Setting
"""
super(DialogueController, self).__init__(engine,
view,
model,
application)
self.dialogue = None
self.view = view
def startTalk(self, npc):
if npc.dialogue is not None:
self.model.active_map.centerCameraOnPlayer()
npc.fifeagent.behaviour.talk(
self.model.game_state.\
getObjectById("PlayerCharacter").fifeagent
)
self.dialogue = self.view.hud.showDialogue(npc)
self.dialogue.initiateDialogue()
self.model.pause(True)
self.view.hud.enabled = False
def pump(self, dt):
ControllerBase.pump(self, dt)
if self.dialogue and not self.dialogue.active:
self.application.manager.pop_mode()
self.model.pause(False)
self.view.hud.enabled = True
| gpl-3.0 |
openstack/cloudbase-init | cloudbaseinit/conf/default.py | 4 | 15944 | # Copyright 2016 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Config options available all across the project."""
from oslo_config import cfg
from cloudbaseinit.conf import base as conf_base
from cloudbaseinit import constant
class GlobalOptions(conf_base.Options):
"""Config options available all across the project."""
def __init__(self, config):
super(GlobalOptions, self).__init__(config, group="DEFAULT")
self._options = [
cfg.BoolOpt(
'allow_reboot', default=True,
help='Allows OS reboots requested by plugins'),
cfg.BoolOpt(
'stop_service_on_exit', default=True,
help='In case of execution as a service, specifies if the '
'service must be gracefully stopped before exiting'),
cfg.BoolOpt(
'check_latest_version', default=False,
help='Check if there is a newer version of cloudbase-init '
'available. If this option is activated, a log '
'message will be emitted if there is a newer version '
'available.'),
cfg.IntOpt(
'retry_count', default=5,
help='Max. number of attempts for fetching metadata in '
'case of transient errors'),
cfg.FloatOpt(
'retry_count_interval', default=4,
help='Interval between attempts in case of transient errors, '
'expressed in seconds'),
cfg.StrOpt(
'mtools_path', default=None,
help='Path to "mtools" program suite, used for interacting '
'with VFAT filesystems'),
cfg.StrOpt(
'bsdtar_path', default='bsdtar.exe',
help='Path to "bsdtar", used to extract ISO ConfigDrive '
'files'),
cfg.BoolOpt(
'netbios_host_name_compatibility', default=True,
help='Truncates the hostname to 15 characters for Netbios '
'compatibility'),
cfg.StrOpt(
'logging_serial_port_settings', default=None,
help='Serial port logging settings. Format: '
'"port,baudrate,parity,bytesize", e.g.: '
'"COM1,115200,N,8". Set to None (default) to disable.'),
cfg.BoolOpt(
'activate_windows', default=False,
help='Activates Windows automatically'),
cfg.BoolOpt(
'set_kms_product_key', default=False,
help='Sets the KMS product key for this operating system'),
cfg.BoolOpt(
'set_avma_product_key', default=False,
help='Sets the AVMA product key for this operating system'),
cfg.StrOpt(
'kms_host', default=None,
help='The KMS host address in form <host>[:<port>], '
'e.g: "kmshost:1688"'),
cfg.BoolOpt(
'log_licensing_info', default=True,
help='Logs the operating system licensing information'),
cfg.BoolOpt(
'winrm_enable_basic_auth', default=True,
help='Enables basic authentication for the WinRM '
'HTTPS listener'),
cfg.BoolOpt(
'winrm_configure_http_listener', default=False,
help='Configures the WinRM HTTP listener'),
cfg.BoolOpt(
'winrm_configure_https_listener', default=True,
help='Configures the WinRM HTTPS listener'),
cfg.ListOpt(
'volumes_to_extend', default=None,
help='List of volumes that need to be extended '
'if contiguous space is available on the disk. '
'By default all the available volumes can be extended. '
'Volumes must be specified using a comma separated list '
'of volume indexes, e.g.: "1,2"'),
cfg.StrOpt(
'san_policy', default=None,
choices=[constant.SAN_POLICY_ONLINE_STR,
constant.SAN_POLICY_OFFLINE_STR,
constant.SAN_POLICY_OFFLINE_SHARED_STR],
help='If not None, the SAN policy is set to the given value'),
cfg.StrOpt(
'local_scripts_path', default=None,
help='Path location containing scripts to be executed when '
'the plugin runs'),
cfg.BoolOpt(
'mtu_use_dhcp_config', default=True,
help='Configures the network interfaces MTU based on the '
'values provided via DHCP'),
cfg.StrOpt(
'username', default='Admin', help='User to be added to the '
'system or updated if already existing'),
cfg.ListOpt(
'groups', default=['Administrators'],
help='List of local groups to which the user specified in '
'"username" will be added'),
cfg.BoolOpt(
'rename_admin_user', default=False,
help='Renames the builtin admin user instead of creating a '
'new user'),
cfg.StrOpt(
'heat_config_dir', default='C:\\cfn',
help='The directory where the Heat configuration files must '
'be saved'),
cfg.BoolOpt(
'ntp_enable_service', default=True,
help='Enables the NTP client service'),
cfg.BoolOpt(
'ntp_use_dhcp_config', default=False,
help='Configures NTP client time synchronization using '
'the NTP servers provided via DHCP'),
cfg.BoolOpt(
'real_time_clock_utc', default=False,
help='Sets the real time clock to use universal time (True) '
'or local time (False)'),
cfg.BoolOpt(
'inject_user_password', default=True,
help='Set the password provided in the configuration. '
'If False or no password is provided, a random one '
'will be set'),
cfg.StrOpt(
'first_logon_behaviour',
default=constant.CLEAR_TEXT_INJECTED_ONLY,
choices=constant.LOGON_PASSWORD_CHANGE_OPTIONS,
help='Control the behaviour of what happens at '
'next logon. If this option is set to `always`, '
'then the user will be forced to change the password '
'at next logon. If it is set to '
'`clear_text_injected_only`, '
'then the user will have to change the password only if '
'the password is a clear text password, coming from the '
'metadata. The last option is `no`, when the user is '
'never forced to change the password.'),
cfg.ListOpt(
'metadata_services',
default=[
'cloudbaseinit.metadata.services.httpservice.HttpService',
'cloudbaseinit.metadata.services'
'.configdrive.ConfigDriveService',
'cloudbaseinit.metadata.services.ec2service.EC2Service',
'cloudbaseinit.metadata.services'
'.maasservice.MaaSHttpService',
'cloudbaseinit.metadata.services.cloudstack.CloudStack',
'cloudbaseinit.metadata.services'
'.opennebulaservice.OpenNebulaService',
],
help='List of enabled metadata service classes, '
'to be tested for availability in the provided order. '
'The first available service will be used to retrieve '
'metadata'),
cfg.ListOpt(
'plugins',
default=[
'cloudbaseinit.plugins.common.mtu.MTUPlugin',
'cloudbaseinit.plugins.windows.ntpclient'
'.NTPClientPlugin',
'cloudbaseinit.plugins.common.sethostname'
'.SetHostNamePlugin',
'cloudbaseinit.plugins.windows.createuser'
'.CreateUserPlugin',
'cloudbaseinit.plugins.common.networkconfig'
'.NetworkConfigPlugin',
'cloudbaseinit.plugins.windows.licensing'
'.WindowsLicensingPlugin',
'cloudbaseinit.plugins.common.sshpublickeys'
'.SetUserSSHPublicKeysPlugin',
'cloudbaseinit.plugins.windows.extendvolumes'
'.ExtendVolumesPlugin',
'cloudbaseinit.plugins.common.userdata.UserDataPlugin',
'cloudbaseinit.plugins.common.setuserpassword.'
'SetUserPasswordPlugin',
'cloudbaseinit.plugins.windows.winrmlistener.'
'ConfigWinRMListenerPlugin',
'cloudbaseinit.plugins.windows.winrmcertificateauth.'
'ConfigWinRMCertificateAuthPlugin',
'cloudbaseinit.plugins.common.localscripts'
'.LocalScriptsPlugin',
],
help='List of enabled plugin classes, '
'to be executed in the provided order'),
cfg.ListOpt(
'user_data_plugins',
default=[
'cloudbaseinit.plugins.common.userdataplugins.parthandler.'
'PartHandlerPlugin',
'cloudbaseinit.plugins.common.userdataplugins.cloudconfig.'
'CloudConfigPlugin',
'cloudbaseinit.plugins.common.userdataplugins'
'.cloudboothook.CloudBootHookPlugin',
'cloudbaseinit.plugins.common.userdataplugins.shellscript.'
'ShellScriptPlugin',
'cloudbaseinit.plugins.common.userdataplugins'
'.multipartmixed.MultipartMixedPlugin',
'cloudbaseinit.plugins.common.userdataplugins.heat.'
'HeatPlugin',
],
help='List of enabled userdata content plugins'),
cfg.ListOpt(
'cloud_config_plugins', default=[],
help='List which contains the name of the cloud config '
'plugins ordered by priority.'),
cfg.BoolOpt(
'rdp_set_keepalive', default=True,
help='Sets the RDP KeepAlive policy'),
cfg.StrOpt(
'bcd_boot_status_policy',
default=None,
choices=[constant.POLICY_IGNORE_ALL_FAILURES],
help='Sets the Windows BCD boot status policy'),
cfg.BoolOpt(
'bcd_enable_auto_recovery', default=False,
help='Enables or disables the BCD auto recovery'),
cfg.BoolOpt(
'set_unique_boot_disk_id', default=True,
help='Sets a new random unique id on the boot disk to avoid '
'collisions'),
cfg.IntOpt(
'display_idle_timeout', default=0,
help='The idle timeout, in seconds, before powering off '
'the display. Set 0 to leave the display always on'),
cfg.ListOpt(
'page_file_volume_labels', default=[],
help='Labels of volumes on which a Windows page file needs to '
'be created. E.g.: "Temporary Storage"'),
cfg.ListOpt(
'page_file_volume_mount_points', default=[],
help='Volume mount points on which a Windows page file needs '
'to be created. E.g.: '
'"\\\\?\\GLOBALROOT\\device\\Harddisk1\\Partition1\\"'),
cfg.BoolOpt(
'trim_enabled', default=False,
help='Enables or disables TRIM delete notifications for '
'the underlying storage device.'),
cfg.BoolOpt(
'process_userdata', default=True,
help='Processes the userdata content based on the type, e.g. '
'executing a PowerShell script'),
cfg.StrOpt(
'userdata_save_path',
default=None,
help='Copies the userdata to the given file path. The path '
'can include environment variables that will be expanded,'
' e.g. "%%SYSTEMDRIVE%%\\CloudbaseInit\\UserData.bin"'),
cfg.BoolOpt(
'enable_automatic_updates', default=None,
help='If set, enables or disables automatic operating '
'system updates.'),
cfg.BoolOpt(
'metadata_report_provisioning_started', default=False,
help='Reports to the metadata service that provisioning has '
'started'),
cfg.BoolOpt(
'metadata_report_provisioning_completed', default=False,
help='Reports to the metadata service that provisioning '
'completed successfully or failed'),
cfg.StrOpt(
'ephemeral_disk_volume_label', default=None,
help='Ephemeral disk volume label, e.g.: "Temporary Storage"'),
cfg.StrOpt(
'ephemeral_disk_volume_mount_point', default=None,
help='Ephemeral disk volume mount point, e.g.:'
'"\\\\?\\GLOBALROOT\\device\\Harddisk1\\Partition1\\"'),
cfg.StrOpt(
'ephemeral_disk_data_loss_warning_path', default=None,
help='Ephemeral disk data loss warning path, relative to the '
'ephemeral disk volume path. E.g.: '
'DATALOSS_WARNING_README.txt'),
cfg.IntOpt(
'user_password_length', default=20,
help='The length of the generated password for the user '
'defined by the `username` config option.'),
]
self._cli_options = [
cfg.BoolOpt(
'reset_service_password', default=True,
help='If set to True, the service user password will be '
'reset at each execution with a new random value of '
'appropriate length and complexity, unless the user is '
'a built-in or domain account.'
'This is needed to avoid "pass the hash" attacks on '
'Windows cloned instances.'),
]
def register(self):
"""Register the current options to the global ConfigOpts object."""
self._config.register_cli_opts(self._cli_options)
self._config.register_opts(self._options + self._cli_options)
def list(self):
"""Return a list which contains all the available options."""
return self._options
| apache-2.0 |
goodhanrry/updaten_915s_to_lollipop | tools/perf/scripts/python/event_analyzing_sample.py | 4719 | 7393 | # event_analyzing_sample.py: general event handler in python
#
# Current perf report is already very powerful with the annotation integrated,
# and this script is not trying to be as powerful as perf report, but
# providing end user/developer a flexible way to analyze the events other
# than trace points.
#
# The 2 database related functions in this script just show how to gather
# the basic information, and users can modify and write their own functions
# according to their specific requirement.
#
# The first function "show_general_events" just does a basic grouping for all
# generic events with the help of sqlite, and the 2nd one "show_pebs_ll" is
# for a x86 HW PMU event: PEBS with load latency data.
#
import os
import sys
import math
import struct
import sqlite3
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from EventClass import *
#
# If the perf.data has a big number of samples, then the insert operation
# will be very time consuming (about 10+ minutes for 10000 samples) if the
# .db database is on disk. Move the .db file to RAM based FS to speedup
# the handling, which will cut the time down to several seconds.
#
con = sqlite3.connect("/dev/shm/perf.db")
con.isolation_level = None
def trace_begin():
print "In trace_begin:\n"
#
# Will create several tables at the start, pebs_ll is for PEBS data with
# load latency info, while gen_events is for general event.
#
con.execute("""
create table if not exists gen_events (
name text,
symbol text,
comm text,
dso text
);""")
con.execute("""
create table if not exists pebs_ll (
name text,
symbol text,
comm text,
dso text,
flags integer,
ip integer,
status integer,
dse integer,
dla integer,
lat integer
);""")
#
# Create and insert event object to a database so that user could
# do more analysis with simple database commands.
#
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "Unknown_dso"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "Unknown_symbol"
# Create the event object and insert it to the right table in database
event = create_event(name, comm, dso, symbol, raw_buf)
insert_db(event)
def insert_db(event):
if event.ev_type == EVTYPE_GENERIC:
con.execute("insert into gen_events values(?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso))
elif event.ev_type == EVTYPE_PEBS_LL:
event.ip &= 0x7fffffffffffffff
event.dla &= 0x7fffffffffffffff
con.execute("insert into pebs_ll values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(event.name, event.symbol, event.comm, event.dso, event.flags,
event.ip, event.status, event.dse, event.dla, event.lat))
def trace_end():
print "In trace_end:\n"
# We show the basic info for the 2 type of event classes
show_general_events()
show_pebs_ll()
con.close()
#
# As the event number may be very big, so we can't use linear way
# to show the histogram in real number, but use a log2 algorithm.
#
def num2sym(num):
# Each number will have at least one '#'
snum = '#' * (int)(math.log(num, 2) + 1)
return snum
def show_general_events():
# Check the total record number in the table
count = con.execute("select count(*) from gen_events")
for t in count:
print "There is %d records in gen_events table" % t[0]
if t[0] == 0:
return
print "Statistics about the general events grouped by thread/symbol/dso: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from gen_events group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from gen_events group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dso
print "\n%40s %8s %16s\n%s" % ("dso", "number", "histogram", "="*74)
dsoq = con.execute("select dso, count(dso) from gen_events group by dso order by -count(dso)")
for row in dsoq:
print "%40s %8d %s" % (row[0], row[1], num2sym(row[1]))
#
# This function just shows the basic info, and we could do more with the
# data in the tables, like checking the function parameters when some
# big latency events happen.
#
def show_pebs_ll():
count = con.execute("select count(*) from pebs_ll")
for t in count:
print "There is %d records in pebs_ll table" % t[0]
if t[0] == 0:
return
print "Statistics about the PEBS Load Latency events grouped by thread/symbol/dse/latency: \n"
# Group by thread
commq = con.execute("select comm, count(comm) from pebs_ll group by comm order by -count(comm)")
print "\n%16s %8s %16s\n%s" % ("comm", "number", "histogram", "="*42)
for row in commq:
print "%16s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by symbol
print "\n%32s %8s %16s\n%s" % ("symbol", "number", "histogram", "="*58)
symbolq = con.execute("select symbol, count(symbol) from pebs_ll group by symbol order by -count(symbol)")
for row in symbolq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by dse
dseq = con.execute("select dse, count(dse) from pebs_ll group by dse order by -count(dse)")
print "\n%32s %8s %16s\n%s" % ("dse", "number", "histogram", "="*58)
for row in dseq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
# Group by latency
latq = con.execute("select lat, count(lat) from pebs_ll group by lat order by lat")
print "\n%32s %8s %16s\n%s" % ("latency", "number", "histogram", "="*58)
for row in latq:
print "%32s %8d %s" % (row[0], row[1], num2sym(row[1]))
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
| gpl-2.0 |
ibelem/crosswalk-test-suite | webapi/tct-cors-w3c-tests/cors-py/support/cors_credential_false.py | 25 | 2284 | # Copyright (c) 2012 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this list
# of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Fan,Weiwei <weiwix.fan@intel.com>
def main(request, response):
response.headers.update([("Access-Control-Allow-Origin", request.headers["host"]),
("Access-Control-Max-Age", 30000),
("Access-Control-Allow-Credentials", "false"),
("Access-Control-Allow-Headers", "MyHeader"),
("Access-Control-Allow-Methods", "CustomMethod"),
(
"Access-Control-Expose-Headers",
"Access-Control-Max-Age, Access-Control-Allow-Origin, Server, Access-Control-Allow-Headers, Access-Control-Allow-Methods"),
("Cache-Control", "no-cache"),
("Pragma", "no-cache"),
("Content-Type", "text/plain")
])
return "HelloWorld"
| bsd-3-clause |
biospi/seamass-windeps | src/boost_1_57_0/tools/boostbook/test/more/run-tests.py | 38 | 3883 | #!/usr/bin/env python
# Copyright 2010 Daniel James.
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
"""Boostbook tests
Usage: python build_docs.py [--generate-gold]
"""
import difflib, getopt, os, re, sys
import lxml.ElementInclude
from lxml import etree
from collections import defaultdict
# Globals
def usage_and_exit():
print __doc__
sys.exit(2)
def main(argv):
script_directory = os.path.dirname(sys.argv[0])
boostbook_directory = os.path.join(script_directory, "../../xsl")
try:
opts, args = getopt.getopt(argv, "",
["generate-gold"])
if(len(args)): usage_and_exit()
except getopt.GetoptError:
usage_and_exit()
generate_gold = False
for opt, arg in opts:
if opt == '--generate-gold':
generate_gold = True
# Walk the test directory
parser = etree.XMLParser()
try:
boostbook_xsl = etree.XSLT(
etree.parse(os.path.join(boostbook_directory, "docbook.xsl"), parser)
)
except lxml.etree.XMLSyntaxError, error:
print "Error parsing boostbook xsl:"
print error
sys.exit(1)
for root, dirs, files in os.walk(os.path.join(script_directory, 'tests')):
for filename in files:
(base, ext) = os.path.splitext(filename)
if (ext == '.xml'):
src_path = os.path.join(root, filename)
gold_path = os.path.join(root, base + '.gold')
try:
doc_text = run_boostbook(parser, boostbook_xsl, src_path)
except:
# TODO: Need better error reporting here:
print "Error running boostbook for " + src_path
continue
if (generate_gold):
file = open(gold_path, 'w')
try:
file.write(doc_text)
finally: file.close()
else:
file = open(gold_path, 'r')
try:
gold_text = file.read()
finally:
file.close()
compare_xml(src_path, doc_text, gold_text)
def run_boostbook(parser, boostbook_xsl, file):
doc = boostbook_xsl(etree.parse(file, parser))
normalize_boostbook_ids(doc)
return etree.tostring(doc)
def normalize_boostbook_ids(doc):
ids = {}
id_bases = defaultdict(int)
for node in doc.xpath("//*[starts-with(@id, 'id') or contains(@id, '_id')]"):
id = node.get('id')
if(id in ids):
print 'Duplicate id: ' + id
match = re.match("(.+_id|id)([mp]?\d+)((?:-bb)?)", id)
if(match):
# Truncate id name, as it sometimes has different lengths...
match2 = re.match("(.*?)([^.]*?)(_?id)", match.group(1))
base = match2.group(1) + match2.group(2)[:14] + match2.group(3)
count = id_bases[base] + 1
id_bases[base] = count
ids[id] = base + str(count) + match.group(3)
for node in doc.xpath("//*[@linkend or @id]"):
x = node.get('linkend')
if(x in ids): node.set('linkend', ids[x])
x = node.get('id')
if(x in ids): node.set('id', ids[x])
def compare_xml(file, doc_text, gold_text):
# Had hoped to use xmldiff but it turned out to be a pain to install.
# So instead just do a text diff.
if (doc_text != gold_text):
print "Error: " + file
print
sys.stdout.writelines(
difflib.unified_diff(
gold_text.splitlines(True),
doc_text.splitlines(True)
)
)
print
print
if __name__ == "__main__":
main(sys.argv[1:])
| apache-2.0 |
bguillot/OpenUpgrade | addons/auth_openid/controllers/__init__.py | 443 | 1033 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import main
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jpautom/scikit-learn | sklearn/utils/tests/test_shortest_path.py | 303 | 2841 | from collections import defaultdict
import numpy as np
from numpy.testing import assert_array_almost_equal
from sklearn.utils.graph import (graph_shortest_path,
single_source_shortest_path_length)
def floyd_warshall_slow(graph, directed=False):
N = graph.shape[0]
#set nonzero entries to infinity
graph[np.where(graph == 0)] = np.inf
#set diagonal to zero
graph.flat[::N + 1] = 0
if not directed:
graph = np.minimum(graph, graph.T)
for k in range(N):
for i in range(N):
for j in range(N):
graph[i, j] = min(graph[i, j], graph[i, k] + graph[k, j])
graph[np.where(np.isinf(graph))] = 0
return graph
def generate_graph(N=20):
#sparse grid of distances
rng = np.random.RandomState(0)
dist_matrix = rng.random_sample((N, N))
#make symmetric: distances are not direction-dependent
dist_matrix = dist_matrix + dist_matrix.T
#make graph sparse
i = (rng.randint(N, size=N * N // 2), rng.randint(N, size=N * N // 2))
dist_matrix[i] = 0
#set diagonal to zero
dist_matrix.flat[::N + 1] = 0
return dist_matrix
def test_floyd_warshall():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_FW = graph_shortest_path(dist_matrix, directed, 'FW')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_FW, graph_py)
def test_dijkstra():
dist_matrix = generate_graph(20)
for directed in (True, False):
graph_D = graph_shortest_path(dist_matrix, directed, 'D')
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
assert_array_almost_equal(graph_D, graph_py)
def test_shortest_path():
dist_matrix = generate_graph(20)
# We compare path length and not costs (-> set distances to 0 or 1)
dist_matrix[dist_matrix != 0] = 1
for directed in (True, False):
if not directed:
dist_matrix = np.minimum(dist_matrix, dist_matrix.T)
graph_py = floyd_warshall_slow(dist_matrix.copy(), directed)
for i in range(dist_matrix.shape[0]):
# Non-reachable nodes have distance 0 in graph_py
dist_dict = defaultdict(int)
dist_dict.update(single_source_shortest_path_length(dist_matrix,
i))
for j in range(graph_py[i].shape[0]):
assert_array_almost_equal(dist_dict[j], graph_py[i, j])
def test_dijkstra_bug_fix():
X = np.array([[0., 0., 4.],
[1., 0., 2.],
[0., 5., 0.]])
dist_FW = graph_shortest_path(X, directed=False, method='FW')
dist_D = graph_shortest_path(X, directed=False, method='D')
assert_array_almost_equal(dist_D, dist_FW)
| bsd-3-clause |
Thoshh/wapad | lib/python2.7/site-packages/django/contrib/staticfiles/management/commands/collectstatic.py | 24 | 14373 | from __future__ import unicode_literals
import os
from collections import OrderedDict
from django.contrib.staticfiles.finders import get_finders
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.storage import FileSystemStorage
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.utils.encoding import smart_text
from django.utils.six.moves import input
class Command(BaseCommand):
"""
Command that allows to copy or symlink static files from different
locations to the settings.STATIC_ROOT.
"""
help = "Collect static files in a single location."
requires_system_checks = False
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
self.copied_files = []
self.symlinked_files = []
self.unmodified_files = []
self.post_processed_files = []
self.storage = staticfiles_storage
self.style = no_style()
try:
self.storage.path('')
except NotImplementedError:
self.local = False
else:
self.local = True
def add_arguments(self, parser):
parser.add_argument('--noinput', '--no-input',
action='store_false', dest='interactive', default=True,
help="Do NOT prompt the user for input of any kind.")
parser.add_argument('--no-post-process',
action='store_false', dest='post_process', default=True,
help="Do NOT post process collected files.")
parser.add_argument('-i', '--ignore', action='append', default=[],
dest='ignore_patterns', metavar='PATTERN',
help="Ignore files or directories matching this glob-style "
"pattern. Use multiple times to ignore more.")
parser.add_argument('-n', '--dry-run',
action='store_true', dest='dry_run', default=False,
help="Do everything except modify the filesystem.")
parser.add_argument('-c', '--clear',
action='store_true', dest='clear', default=False,
help="Clear the existing files using the storage "
"before trying to copy or link the original file.")
parser.add_argument('-l', '--link',
action='store_true', dest='link', default=False,
help="Create a symbolic link to each file instead of copying.")
parser.add_argument('--no-default-ignore', action='store_false',
dest='use_default_ignore_patterns', default=True,
help="Don't ignore the common private glob-style patterns 'CVS', "
"'.*' and '*~'.")
def set_options(self, **options):
"""
Set instance variables based on an options dict
"""
self.interactive = options['interactive']
self.verbosity = options['verbosity']
self.symlink = options['link']
self.clear = options['clear']
self.dry_run = options['dry_run']
ignore_patterns = options['ignore_patterns']
if options['use_default_ignore_patterns']:
ignore_patterns += ['CVS', '.*', '*~']
self.ignore_patterns = list(set(ignore_patterns))
self.post_process = options['post_process']
def collect(self):
"""
Perform the bulk of the work of collectstatic.
Split off from handle() to facilitate testing.
"""
if self.symlink and not self.local:
raise CommandError("Can't symlink to a remote destination.")
if self.clear:
self.clear_dir('')
if self.symlink:
handler = self.link_file
else:
handler = self.copy_file
found_files = OrderedDict()
for finder in get_finders():
for path, storage in finder.list(self.ignore_patterns):
# Prefix the relative path if the source storage contains it
if getattr(storage, 'prefix', None):
prefixed_path = os.path.join(storage.prefix, path)
else:
prefixed_path = path
if prefixed_path not in found_files:
found_files[prefixed_path] = (storage, path)
handler(path, prefixed_path, storage)
else:
self.log(
"Found another file with the destination path '%s'. It "
"will be ignored since only the first encountered file "
"is collected. If this is not what you want, make sure "
"every static file has a unique path." % prefixed_path,
level=1,
)
# Here we check if the storage backend has a post_process
# method and pass it the list of modified files.
if self.post_process and hasattr(self.storage, 'post_process'):
processor = self.storage.post_process(found_files,
dry_run=self.dry_run)
for original_path, processed_path, processed in processor:
if isinstance(processed, Exception):
self.stderr.write("Post-processing '%s' failed!" % original_path)
# Add a blank line before the traceback, otherwise it's
# too easy to miss the relevant part of the error message.
self.stderr.write("")
raise processed
if processed:
self.log("Post-processed '%s' as '%s'" %
(original_path, processed_path), level=1)
self.post_processed_files.append(original_path)
else:
self.log("Skipped post-processing '%s'" % original_path)
return {
'modified': self.copied_files + self.symlinked_files,
'unmodified': self.unmodified_files,
'post_processed': self.post_processed_files,
}
def handle(self, **options):
self.set_options(**options)
message = ['\n']
if self.dry_run:
message.append(
'You have activated the --dry-run option so no files will be modified.\n\n'
)
message.append(
'You have requested to collect static files at the destination\n'
'location as specified in your settings'
)
if self.is_local_storage() and self.storage.location:
destination_path = self.storage.location
message.append(':\n\n %s\n\n' % destination_path)
else:
destination_path = None
message.append('.\n\n')
if self.clear:
message.append('This will DELETE ALL FILES in this location!\n')
else:
message.append('This will overwrite existing files!\n')
message.append(
'Are you sure you want to do this?\n\n'
"Type 'yes' to continue, or 'no' to cancel: "
)
if self.interactive and input(''.join(message)) != 'yes':
raise CommandError("Collecting static files cancelled.")
collected = self.collect()
modified_count = len(collected['modified'])
unmodified_count = len(collected['unmodified'])
post_processed_count = len(collected['post_processed'])
if self.verbosity >= 1:
template = ("\n%(modified_count)s %(identifier)s %(action)s"
"%(destination)s%(unmodified)s%(post_processed)s.\n")
summary = template % {
'modified_count': modified_count,
'identifier': 'static file' + ('' if modified_count == 1 else 's'),
'action': 'symlinked' if self.symlink else 'copied',
'destination': (" to '%s'" % destination_path if destination_path else ''),
'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''),
'post_processed': (collected['post_processed'] and
', %s post-processed'
% post_processed_count or ''),
}
self.stdout.write(summary)
def log(self, msg, level=2):
"""
Small log helper
"""
if self.verbosity >= level:
self.stdout.write(msg)
def is_local_storage(self):
return isinstance(self.storage, FileSystemStorage)
def clear_dir(self, path):
"""
Deletes the given relative path using the destination storage backend.
"""
if not self.storage.exists(path):
return
dirs, files = self.storage.listdir(path)
for f in files:
fpath = os.path.join(path, f)
if self.dry_run:
self.log("Pretending to delete '%s'" %
smart_text(fpath), level=1)
else:
self.log("Deleting '%s'" % smart_text(fpath), level=1)
try:
full_path = self.storage.path(fpath)
except NotImplementedError:
self.storage.delete(fpath)
else:
if not os.path.exists(full_path) and os.path.lexists(full_path):
# Delete broken symlinks
os.unlink(full_path)
else:
self.storage.delete(fpath)
for d in dirs:
self.clear_dir(os.path.join(path, d))
def delete_file(self, path, prefixed_path, source_storage):
"""
Checks if the target file should be deleted if it already exists
"""
if self.storage.exists(prefixed_path):
try:
# When was the target file modified last time?
target_last_modified = \
self.storage.modified_time(prefixed_path)
except (OSError, NotImplementedError, AttributeError):
# The storage doesn't support ``modified_time`` or failed
pass
else:
try:
# When was the source file modified last time?
source_last_modified = source_storage.modified_time(path)
except (OSError, NotImplementedError, AttributeError):
pass
else:
# The full path of the target file
if self.local:
full_path = self.storage.path(prefixed_path)
else:
full_path = None
# Skip the file if the source file is younger
# Avoid sub-second precision (see #14665, #19540)
if (target_last_modified.replace(microsecond=0)
>= source_last_modified.replace(microsecond=0)):
if not ((self.symlink and full_path
and not os.path.islink(full_path)) or
(not self.symlink and full_path
and os.path.islink(full_path))):
if prefixed_path not in self.unmodified_files:
self.unmodified_files.append(prefixed_path)
self.log("Skipping '%s' (not modified)" % path)
return False
# Then delete the existing file if really needed
if self.dry_run:
self.log("Pretending to delete '%s'" % path)
else:
self.log("Deleting '%s'" % path)
self.storage.delete(prefixed_path)
return True
def link_file(self, path, prefixed_path, source_storage):
"""
Attempt to link ``path``
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.symlinked_files:
return self.log("Skipping '%s' (already linked earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally link the file
if self.dry_run:
self.log("Pretending to link '%s'" % source_path, level=1)
else:
self.log("Linking '%s'" % source_path, level=1)
full_path = self.storage.path(prefixed_path)
try:
os.makedirs(os.path.dirname(full_path))
except OSError:
pass
try:
if os.path.lexists(full_path):
os.unlink(full_path)
os.symlink(source_path, full_path)
except AttributeError:
import platform
raise CommandError("Symlinking is not supported by Python %s." %
platform.python_version())
except NotImplementedError:
import platform
raise CommandError("Symlinking is not supported in this "
"platform (%s)." % platform.platform())
except OSError as e:
raise CommandError(e)
if prefixed_path not in self.symlinked_files:
self.symlinked_files.append(prefixed_path)
def copy_file(self, path, prefixed_path, source_storage):
"""
Attempt to copy ``path`` with storage
"""
# Skip this file if it was already copied earlier
if prefixed_path in self.copied_files:
return self.log("Skipping '%s' (already copied earlier)" % path)
# Delete the target file if needed or break
if not self.delete_file(path, prefixed_path, source_storage):
return
# The full path of the source file
source_path = source_storage.path(path)
# Finally start copying
if self.dry_run:
self.log("Pretending to copy '%s'" % source_path, level=1)
else:
self.log("Copying '%s'" % source_path, level=1)
with source_storage.open(path) as source_file:
self.storage.save(prefixed_path, source_file)
self.copied_files.append(prefixed_path)
| mit |
jasbro1/PDMProject | node_modules/node-gyp/gyp/pylib/gyp/input_test.py | 1841 | 3207 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['a']]],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
self.nodes['a'].FindCycles())
self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
[self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
self.assertTrue(
[self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([[self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a']]],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
| mit |
vibhorag/scikit-learn | doc/tutorial/machine_learning_map/svg2imagemap.py | 360 | 3411 | #!/usr/local/bin/python
"""
This script converts a subset of SVG into an HTML imagemap
Note *subset*. It only handles <path> elements, for which it only pays
attention to the M and L commands. Futher, it only notices the "translate"
transform.
It was written to generate the examples in the documentation for maphilight,
and thus is very squarely aimed at handling several SVG maps from wikipedia.
It *assumes* that all the <path>s it will need are inside a <g>. Any <path>
outside of a <g> will be ignored.
It takes several possible arguments, in the form:
$ svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]
FILENAME must be the name of an SVG file. All other arguments are optional.
x and y, if present, are the dimensions of the image you'll be creating from
the SVG. If not present, it assumes the values of the width and height
attributes in the SVG file.
group1 through groupN are group ids. If only want particular groups used,
enter their ids here and all others will be ignored.
"""
import os
import re
import sys
import xml.dom.minidom
import parse_path
if len(sys.argv) == 1:
sys.exit("svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]")
if not os.path.exists(sys.argv[1]):
sys.exit("Input file does not exist")
x, y, groups = None, None, None
if len(sys.argv) >= 3:
x = float(sys.argv[2])
y = float(sys.argv[3])
if len(sys.argv) > 3:
groups = sys.argv[4:]
svg_file = xml.dom.minidom.parse(sys.argv[1])
svg = svg_file.getElementsByTagName('svg')[0]
raw_width = float(svg.getAttribute('width'))
raw_height = float(svg.getAttribute('height'))
width_ratio = x and (x / raw_width) or 1
height_ratio = y and (y / raw_height) or 1
if groups:
elements = [g for g in svg.getElementsByTagName('g') if (g.hasAttribute('id') and g.getAttribute('id') in groups)]
elements.extend([p for p in svg.getElementsByTagName('path') if (p.hasAttribute('id') and p.getAttribute('id') in groups)])
else:
elements = svg.getElementsByTagName('g')
parsed_groups = {}
for e in elements:
paths = []
if e.nodeName == 'g':
for path in e.getElementsByTagName('path'):
points = parse_path.get_points(path.getAttribute('d'))
for pointset in points:
paths.append([path.getAttribute('id'), pointset])
else:
points = parse_path.get_points(e.getAttribute('d'))
for pointset in points:
paths.append([e.getAttribute('id'), pointset])
if e.hasAttribute('transform'):
print e.getAttribute('id'), e.getAttribute('transform')
for transform in re.findall(r'(\w+)\((-?\d+.?\d*),(-?\d+.?\d*)\)', e.getAttribute('transform')):
if transform[0] == 'translate':
x_shift = float(transform[1])
y_shift = float(transform[2])
for path in paths:
path[1] = [(p[0] + x_shift, p[1] + y_shift) for p in path[1]]
parsed_groups[e.getAttribute('id')] = paths
out = []
for g in parsed_groups:
for path in parsed_groups[g]:
out.append('<area href="#" title="%s" shape="poly" coords="%s"></area>' %
(path[0], ', '.join([("%d,%d" % (p[0]*width_ratio, p[1]*height_ratio)) for p in path[1]])))
outfile = open(sys.argv[1].replace('.svg', '.html'), 'w')
outfile.write('\n'.join(out))
| bsd-3-clause |
loco-odoo/localizacion_co | openerp/addons-extra/odoo-pruebas/odoo-server/addons/mass_mailing/models/mass_mailing_stats.py | 91 | 5053 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp.osv import osv, fields
class MailMailStats(osv.Model):
""" MailMailStats models the statistics collected about emails. Those statistics
are stored in a separated model and table to avoid bloating the mail_mail table
with statistics values. This also allows to delete emails send with mass mailing
without loosing the statistics about them. """
_name = 'mail.mail.statistics'
_description = 'Email Statistics'
_rec_name = 'message_id'
_order = 'message_id'
_columns = {
'mail_mail_id': fields.many2one('mail.mail', 'Mail', ondelete='set null'),
'mail_mail_id_int': fields.integer(
'Mail ID (tech)',
help='ID of the related mail_mail. This field is an integer field because'
'the related mail_mail can be deleted separately from its statistics.'
'However the ID is needed for several action and controllers.'
),
'message_id': fields.char('Message-ID'),
'model': fields.char('Document model'),
'res_id': fields.integer('Document ID'),
# campaign / wave data
'mass_mailing_id': fields.many2one(
'mail.mass_mailing', 'Mass Mailing',
ondelete='set null',
),
'mass_mailing_campaign_id': fields.related(
'mass_mailing_id', 'mass_mailing_campaign_id',
type='many2one', ondelete='set null',
relation='mail.mass_mailing.campaign',
string='Mass Mailing Campaign',
store=True, readonly=True,
),
# Bounce and tracking
'scheduled': fields.datetime('Scheduled', help='Date when the email has been created'),
'sent': fields.datetime('Sent', help='Date when the email has been sent'),
'exception': fields.datetime('Exception', help='Date of technical error leading to the email not being sent'),
'opened': fields.datetime('Opened', help='Date when the email has been opened the first time'),
'replied': fields.datetime('Replied', help='Date when this email has been replied for the first time.'),
'bounced': fields.datetime('Bounced', help='Date when this email has bounced.'),
}
_defaults = {
'scheduled': fields.datetime.now,
}
def create(self, cr, uid, values, context=None):
if 'mail_mail_id' in values:
values['mail_mail_id_int'] = values['mail_mail_id']
res = super(MailMailStats, self).create(cr, uid, values, context=context)
return res
def _get_ids(self, cr, uid, ids=None, mail_mail_ids=None, mail_message_ids=None, domain=None, context=None):
if not ids and mail_mail_ids:
base_domain = [('mail_mail_id_int', 'in', mail_mail_ids)]
elif not ids and mail_message_ids:
base_domain = [('message_id', 'in', mail_message_ids)]
else:
base_domain = [('id', 'in', ids or [])]
if domain:
base_domain = ['&'] + domain + base_domain
return self.search(cr, uid, base_domain, context=context)
def set_opened(self, cr, uid, ids=None, mail_mail_ids=None, mail_message_ids=None, context=None):
stat_ids = self._get_ids(cr, uid, ids, mail_mail_ids, mail_message_ids, [('opened', '=', False)], context)
self.write(cr, uid, stat_ids, {'opened': fields.datetime.now()}, context=context)
return stat_ids
def set_replied(self, cr, uid, ids=None, mail_mail_ids=None, mail_message_ids=None, context=None):
stat_ids = self._get_ids(cr, uid, ids, mail_mail_ids, mail_message_ids, [('replied', '=', False)], context)
self.write(cr, uid, stat_ids, {'replied': fields.datetime.now()}, context=context)
return stat_ids
def set_bounced(self, cr, uid, ids=None, mail_mail_ids=None, mail_message_ids=None, context=None):
stat_ids = self._get_ids(cr, uid, ids, mail_mail_ids, mail_message_ids, [('bounced', '=', False)], context)
self.write(cr, uid, stat_ids, {'bounced': fields.datetime.now()}, context=context)
return stat_ids
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.