repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
stvstnfrd/edx-platform | openedx/core/djangoapps/xblock/apps.py | 9 | 3611 | """
Django app configuration for the XBlock Runtime django app
"""
from django.apps import AppConfig, apps
from django.conf import settings
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
class XBlockAppConfig(AppConfig):
"""
Django app configuration for the new XBlock Runtime django app
"""
name = 'openedx.core.djangoapps.xblock'
verbose_name = 'New XBlock Runtime'
label = 'xblock_new' # The name 'xblock' is already taken by ORA2's 'openassessment.xblock' app :/
def get_runtime_system_params(self):
"""
Get the XBlockRuntimeSystem parameters appropriate for viewing and/or
editing XBlock content.
"""
raise NotImplementedError
def get_site_root_url(self):
"""
Get the absolute root URL to this site, e.g. 'https://courses.example.com'
Should not have any trailing slash.
"""
raise NotImplementedError
def get_learning_context_params(self):
"""
Get additional kwargs that are passed to learning context implementations
(LearningContext subclass constructors). For example, this can be used to
specify that the course learning context should load the course's list of
blocks from the _draft_ version of the course in studio, but from the
published version of the course in the LMS.
"""
return {}
class LmsXBlockAppConfig(XBlockAppConfig):
"""
LMS-specific configuration of the XBlock Runtime django app.
"""
def get_runtime_system_params(self):
"""
Get the XBlockRuntimeSystem parameters appropriate for viewing and/or
editing XBlock content in the LMS
"""
return dict(
student_data_mode='persisted',
)
def get_site_root_url(self):
"""
Get the absolute root URL to this site, e.g. 'https://courses.example.com'
Should not have any trailing slash.
"""
return configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL)
class StudioXBlockAppConfig(XBlockAppConfig):
"""
Studio-specific configuration of the XBlock Runtime django app.
"""
BLOCKSTORE_DRAFT_NAME = "studio_draft"
def get_runtime_system_params(self):
"""
Get the XBlockRuntimeSystem parameters appropriate for viewing and/or
editing XBlock content in Studio
"""
return dict(
student_data_mode='ephemeral',
)
def get_site_root_url(self):
"""
Get the absolute root URL to this site, e.g. 'https://studio.example.com'
Should not have any trailing slash.
"""
scheme = "https" if settings.HTTPS == "on" else "http"
return scheme + '://' + settings.CMS_BASE
# or for the LMS version: configuration_helpers.get_value('LMS_ROOT_URL', settings.LMS_ROOT_URL)
def get_learning_context_params(self):
"""
Get additional kwargs that are passed to learning context implementations
(LearningContext subclass constructors). For example, this can be used to
specify that the course learning context should load the course's list of
blocks from the _draft_ version of the course in studio, but from the
published version of the course in the LMS.
"""
return {
"use_draft": self.BLOCKSTORE_DRAFT_NAME,
}
def get_xblock_app_config():
"""
Get whichever of the above AppConfig subclasses is active.
"""
return apps.get_app_config(XBlockAppConfig.label)
| agpl-3.0 |
embray/numpy | numpy/lib/npyio.py | 1 | 66490 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from ._compiled_base import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource']
def seek_gzip_factory(f):
"""Use this factory to produce the class so that we can do a lazy
import on gzip.
"""
import gzip
class GzipFile(gzip.GzipFile):
def seek(self, offset, whence=0):
# figure out new position (we can only seek forwards)
if whence == 1:
offset = self.offset + offset
if whence not in [0, 1]:
raise IOError("Illegal argument")
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
def tell(self):
return self.offset
if isinstance(f, str):
f = GzipFile(f)
elif isinstance(f, gzip.GzipFile):
# cast to our GzipFile if its already a gzip.GzipFile
try:
name = f.name
except AttributeError:
# Backward compatibility for <= 2.5
name = f.filename
mode = f.mode
f = GzipFile(fileobj=f.fileobj, filename=name)
f.mode = mode
return f
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. Compressed files with the filename extension
``.gz`` are acceptable. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif isinstance(file, gzip.GzipFile):
fid = seek_gzip_factory(file)
else:
fid = file
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid)
else:
# Try a pickle
try:
return pickle.load(fid)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see `format`.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val))
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return float
elif issubclass(typ, np.complex):
return complex
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
record data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str, optional
The character used to indicate the start of a comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a record
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
Examples
--------
>>> from StringIO import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
comments = asbytes(comments)
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
fh = iter(seek_gzip_factory(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter."""
line = asbytes(line).split(comments)[0].strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if not ndmin in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
Character separating columns.
newline : str, optional
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
Character separating lines.
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
fh.write(asbytes(format % tuple(row) + newline))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skiprows=0, skip_header=0, skip_footer=0, converters=None,
missing='', missing_values=None, filling_values=None,
usecols=None, names=None,
excludelist=None, deletechars=None, replace_space='_',
autostrip=False, case_sensitive=True, defaultfmt="f%i",
unpack=None, usemask=False, loose=True, invalid_raise=True):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skip_rows : int, optional
`skip_rows` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was deprecated in numpy 1.5, and will be removed in
numpy 2.0. Please use `missing_values` instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from StringIO import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing, unicode):
missing = asbytes(missing)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Get the first valid lines after the first skiprows ones ..
if skiprows:
warnings.warn(
"The use of `skiprows` is deprecated, it will be removed in "
"numpy 2.0.\nPlease use `skip_header` instead.",
DeprecationWarning)
skip_header = skiprows
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = asbytes('').join(first_line.split(comments)[1:])
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the deprecated `missing`
if missing != asbytes(''):
warnings.warn(
"The use of `missing` is deprecated, it will be removed in "
"Numpy 2.0.\nPlease use `missing_values` instead.",
DeprecationWarning)
values = [str(_) for _ in missing.split(asbytes(","))]
for entry in missing_values:
entry.extend(values)
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values or []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (i, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(i):
try:
i = names.index(i)
except ValueError:
continue
elif usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
# Find the value to test:
if len(first_line):
testing_value = first_values[i]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
# Select only the columns we need
if usecols:
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values, missing_values)]))
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
#
# if loose:
# conversionfuncs = [conv._loose_call for conv in converters]
# else:
# conversionfuncs = [conv._strict_call for conv in converters]
# for (i, vals) in enumerate(rows):
# rows[i] = tuple([convert(val)
# for (convert, val) in zip(conversionfuncs, vals)])
if loose:
rows = list(zip(*[[converter._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, converter) in enumerate(converters)]))
else:
rows = list(zip(*[[converter._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, converter) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for (i, ttype) in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.update(dtype=kwargs.get('dtype', None))
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
case_sensitive = kwargs.get('case_sensitive', "lower") or "lower"
names = kwargs.get('names', True)
if names is None:
names = True
kwargs.update(dtype=kwargs.get('update', None),
delimiter=kwargs.get('delimiter', ",") or ",",
names=names,
case_sensitive=case_sensitive)
usemask = kwargs.get("usemask", False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
xuanyuanking/spark | python/pyspark/rddsampler.py | 157 | 4250 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import random
import math
class RDDSamplerBase(object):
def __init__(self, withReplacement, seed=None):
self._seed = seed if seed is not None else random.randint(0, sys.maxsize)
self._withReplacement = withReplacement
self._random = None
def initRandomGenerator(self, split):
self._random = random.Random(self._seed ^ split)
# mixing because the initial seeds are close to each other
for _ in range(10):
self._random.randint(0, 1)
def getUniformSample(self):
return self._random.random()
def getPoissonSample(self, mean):
# Using Knuth's algorithm described in
# http://en.wikipedia.org/wiki/Poisson_distribution
if mean < 20.0:
# one exp and k+1 random calls
l = math.exp(-mean)
p = self._random.random()
k = 0
while p > l:
k += 1
p *= self._random.random()
else:
# switch to the log domain, k+1 expovariate (random + log) calls
p = self._random.expovariate(mean)
k = 0
while p < 1.0:
k += 1
p += self._random.expovariate(mean)
return k
def func(self, split, iterator):
raise NotImplementedError
class RDDSampler(RDDSamplerBase):
def __init__(self, withReplacement, fraction, seed=None):
RDDSamplerBase.__init__(self, withReplacement, seed)
self._fraction = fraction
def func(self, split, iterator):
self.initRandomGenerator(split)
if self._withReplacement:
for obj in iterator:
# For large datasets, the expected number of occurrences of each element in
# a sample with replacement is Poisson(frac). We use that to get a count for
# each element.
count = self.getPoissonSample(self._fraction)
for _ in range(0, count):
yield obj
else:
for obj in iterator:
if self.getUniformSample() < self._fraction:
yield obj
class RDDRangeSampler(RDDSamplerBase):
def __init__(self, lowerBound, upperBound, seed=None):
RDDSamplerBase.__init__(self, False, seed)
self._lowerBound = lowerBound
self._upperBound = upperBound
def func(self, split, iterator):
self.initRandomGenerator(split)
for obj in iterator:
if self._lowerBound <= self.getUniformSample() < self._upperBound:
yield obj
class RDDStratifiedSampler(RDDSamplerBase):
def __init__(self, withReplacement, fractions, seed=None):
RDDSamplerBase.__init__(self, withReplacement, seed)
self._fractions = fractions
def func(self, split, iterator):
self.initRandomGenerator(split)
if self._withReplacement:
for key, val in iterator:
# For large datasets, the expected number of occurrences of each element in
# a sample with replacement is Poisson(frac). We use that to get a count for
# each element.
count = self.getPoissonSample(self._fractions[key])
for _ in range(0, count):
yield key, val
else:
for key, val in iterator:
if self.getUniformSample() < self._fractions[key]:
yield key, val
| apache-2.0 |
akaariai/django | django/db/backends/oracle/introspection.py | 517 | 11463 | import cx_Oracle
from django.db.backends.base.introspection import (
BaseDatabaseIntrospection, FieldInfo, TableInfo,
)
from django.utils.encoding import force_text
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type objects to Django Field types.
data_types_reverse = {
cx_Oracle.BLOB: 'BinaryField',
cx_Oracle.CLOB: 'TextField',
cx_Oracle.DATETIME: 'DateField',
cx_Oracle.FIXED_CHAR: 'CharField',
cx_Oracle.NCLOB: 'TextField',
cx_Oracle.NUMBER: 'DecimalField',
cx_Oracle.STRING: 'CharField',
cx_Oracle.TIMESTAMP: 'DateTimeField',
}
try:
data_types_reverse[cx_Oracle.NATIVE_FLOAT] = 'FloatField'
except AttributeError:
pass
try:
data_types_reverse[cx_Oracle.UNICODE] = 'CharField'
except AttributeError:
pass
cache_bust_counter = 1
def get_field_type(self, data_type, description):
# If it's a NUMBER with scale == 0, consider it an IntegerField
if data_type == cx_Oracle.NUMBER:
precision, scale = description[4:6]
if scale == 0:
if precision > 11:
return 'BigIntegerField'
elif precision == 1:
return 'BooleanField'
else:
return 'IntegerField'
elif scale == -127:
return 'FloatField'
return super(DatabaseIntrospection, self).get_field_type(data_type, description)
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL "
"SELECT VIEW_NAME, 'v' FROM USER_VIEWS")
return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
self.cache_bust_counter += 1
cursor.execute("SELECT * FROM {} WHERE ROWNUM < 2 AND {} > 0".format(
self.connection.ops.quote_name(table_name),
self.cache_bust_counter))
description = []
for desc in cursor.description:
name = force_text(desc[0]) # cx_Oracle always returns a 'str' on both Python 2 and 3
name = name % {} # cx_Oracle, for some reason, doubles percent signs.
description.append(FieldInfo(*(name.lower(),) + desc[1:]))
return description
def table_name_converter(self, name):
"Table name comparison is case insensitive under Oracle"
return name.lower()
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return {d[0]: i for i, d in enumerate(self.get_table_description(cursor, table_name))}
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
table_name = table_name.upper()
cursor.execute("""
SELECT ta.column_name, tb.table_name, tb.column_name
FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,
user_tab_cols ta, user_tab_cols tb
WHERE user_constraints.table_name = %s AND
ta.table_name = user_constraints.table_name AND
ta.column_name = ca.column_name AND
ca.table_name = ta.table_name AND
user_constraints.constraint_name = ca.constraint_name AND
user_constraints.r_constraint_name = cb.constraint_name AND
cb.table_name = tb.table_name AND
cb.column_name = tb.column_name AND
ca.position = cb.position""", [table_name])
relations = {}
for row in cursor.fetchall():
relations[row[0].lower()] = (row[2].lower(), row[1].lower())
return relations
def get_key_columns(self, cursor, table_name):
cursor.execute("""
SELECT ccol.column_name, rcol.table_name AS referenced_table, rcol.column_name AS referenced_column
FROM user_constraints c
JOIN user_cons_columns ccol
ON ccol.constraint_name = c.constraint_name
JOIN user_cons_columns rcol
ON rcol.constraint_name = c.r_constraint_name
WHERE c.table_name = %s AND c.constraint_type = 'R'""", [table_name.upper()])
return [tuple(cell.lower() for cell in row)
for row in cursor.fetchall()]
def get_indexes(self, cursor, table_name):
sql = """
SELECT LOWER(uic1.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1 ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1 ELSE 0
END AS is_unique
FROM user_constraints, user_indexes, user_ind_columns uic1
WHERE user_constraints.constraint_type (+) = 'P'
AND user_constraints.index_name (+) = uic1.index_name
AND user_indexes.uniqueness (+) = 'UNIQUE'
AND user_indexes.index_name (+) = uic1.index_name
AND uic1.table_name = UPPER(%s)
AND uic1.column_position = 1
AND NOT EXISTS (
SELECT 1
FROM user_ind_columns uic2
WHERE uic2.index_name = uic1.index_name
AND uic2.column_position = 2
)
"""
cursor.execute(sql, [table_name])
indexes = {}
for row in cursor.fetchall():
indexes[row[0]] = {'primary_key': bool(row[1]),
'unique': bool(row[2])}
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs and uniques
cursor.execute("""
SELECT
user_constraints.constraint_name,
LOWER(cols.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint
FROM
user_constraints
INNER JOIN
user_indexes ON user_indexes.index_name = user_constraints.index_name
LEFT OUTER JOIN
user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
WHERE
(
user_constraints.constraint_type = 'P' OR
user_constraints.constraint_type = 'U'
)
AND user_constraints.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, pk, unique, check in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": pk,
"unique": unique,
"foreign_key": None,
"check": check,
"index": True, # All P and U come with index, see inner join above
}
# Record the details
constraints[constraint]['columns'].append(column)
# Check constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name
FROM
user_constraints cons
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'C' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Foreign key constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name,
LOWER(rcons.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_constraints rcons ON cons.r_constraint_name = rcons.constraint_name
INNER JOIN
user_cons_columns rcols ON rcols.constraint_name = rcons.constraint_name
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, other_table, other_column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": (other_table, other_column),
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
index_name,
LOWER(column_name)
FROM
user_ind_columns cols
WHERE
table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE cols.index_name = cons.index_name
)
ORDER BY cols.column_position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": False,
"index": True,
}
# Record the details
constraints[constraint]['columns'].append(column)
return constraints
| bsd-3-clause |
ramanala/PACE | pacersmexplorer.py | 1 | 23886 | #!/usr/bin/env python
#Copyright (c) 2016 Ramnatthan Alagappan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
import os
import subprocess
import cProfile
import Queue
import threading
import time
import pprint
import code
import sys
import collections
import gc
from _paceutils import *
from pace import DSReplayer
from pacedefaultfs import defaultfs, defaultnet
import itertools
import pickle
from collections import defaultdict
import math
class MultiThreadedChecker(threading.Thread):
queue = Queue.Queue()
outputs = {}
def __init__(self, queue, thread_id='0'):
threading.Thread.__init__(self)
self.queue = MultiThreadedChecker.queue
self.thread_id = str(thread_id)
def __threaded_check(self, base_path, dirnames, client_stdout, crashid):
assert type(paceconfig(0).checker_tool) in [list, str, tuple]
dirname_param = ''
for dirname in dirnames.values():
dirname_param += str(dirname) + str('@')
args = [paceconfig(0).checker_tool, dirname_param, base_path, client_stdout, self.thread_id]
retcode = subprocess.call(args)
MultiThreadedChecker.outputs[crashid] = retcode
def run(self):
while True:
task = self.queue.get()
self.__threaded_check(*task)
self.queue.task_done()
@staticmethod
def check_later(base_path, dirnames, client_stdout, retcodeid):
MultiThreadedChecker.queue.put((base_path, dirnames, client_stdout, retcodeid))
@staticmethod
def reset():
assert MultiThreadedChecker.queue.empty()
MultiThreadedChecker.outputs = {}
@staticmethod
def wait_and_get_outputs():
MultiThreadedChecker.queue.join()
return MultiThreadedChecker.outputs
def get_crash_point_id_string(crash_point):
toret = ""
for i in range(0, len(crash_point)):
c = crash_point[i]
if c == -1:
c = 'z' # the node has not done any persistent state update
if i < len(crash_point)-1:
toret += str(c) + "-"
else:
toret += str(c)
return toret
def dict_value_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.itervalues()))
def atleast_one_present(machines, currs, ends):
for m in machines:
if currs[m] < len(ends[m]):
return True
return False
def replay_dir_base_name_RO(crash_point, omit_pt):
assert type(omit_pt) == dict
base_name = get_crash_point_id_string(crash_point)
base_name += "_RO"
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key])
return toret
base_name += "_OM" + dict_string(omit_pt)
return base_name
def replay_dir_base_name_ARO(crash_point, omit_pt):
assert type(omit_pt) == dict
base_name = get_crash_point_id_string(crash_point)
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key][1])
return toret
base_name += "_ARO" + dict_string(omit_pt)
return base_name
def replay_dir_base_name_AP(crash_point, end_pt):
assert type(end_pt) == dict
base_name = get_crash_point_id_string(crash_point)
def dict_string(d):
toret = ''
for key in d:
toret += '_' + str(key) + '=' + str(d[key])
return toret
base_name += "_AP" + dict_string(end_pt)
return base_name
def append_or_trunc_ops(replayer, machines, crash_point):
toret = {}
for machine in machines:
curr_op = replayer.micro_ops[machine][crash_point[machine]].op
toret[machine] = curr_op == 'append' or curr_op == 'trunc'
return toret
def nCr(n,r):
func = math.factorial
return func(n) / func(r) / func(n-r)
def get_replay_dirs(machines, base_name):
dirnames = {}
base_path = os.path.join(paceconfig(0).scratchpad_dir, base_name)
for machine in machines:
os.system('rm -rf ' + base_path)
os.system('mkdir -p ' + base_path)
dirnames[machine] = os.path.join(base_path , 'rdir-' + str(machine))
stdout_files = {}
for machine_id in dirnames.keys():
stdout_files[machine_id] = os.path.join(base_path, str(machine_id) + '.input_stdout')
return (base_path, dirnames,stdout_files)
def unique_grp(grps, machines, filter_machines):
assert len(machines) > 0 and len(filter_machines) < len(machines)
to_ret = []
to_ret_set = set()
temp = {}
max_for_state = defaultdict(lambda:-1, temp)
for state in grps:
state_arr = list(state)
for machine in machines:
if machine not in filter_machines:
val = state_arr[machine]
del state_arr[machine]
if tuple(state_arr) not in max_for_state.keys():
max_for_state[tuple(state_arr)] = val
else:
if max_for_state[tuple(state_arr)] < val:
max_for_state[tuple(state_arr)] = val
state_arr.insert(machine, max_for_state[tuple(state_arr)])
to_ret_set.add(tuple(state_arr))
return to_ret_set
def check_logically_same(to_omit_list):
ops_eq = all(x.op == to_omit_list[0].op for x in to_omit_list)
if ops_eq:
name_checking_ops = ['write', 'append', 'creat', 'trunc', 'unlink']
if to_omit_list[0].op in name_checking_ops:
name_eq = all(os.path.basename(x.name) == os.path.basename(to_omit_list[0].name) for x in to_omit_list)
return ops_eq and name_eq
elif to_omit_list[0].op == 'rename':
dest_eq = all(os.path.basename(x.dest) == os.path.basename(to_omit_list[0].dest) for x in to_omit_list)
src_eq = all(os.path.basename(x.source) == os.path.basename(to_omit_list[0].source) for x in to_omit_list)
return ops_eq and dest_eq and src_eq
else:
for omit in to_omit_list:
if 'fsync' in str(omit):
return False
assert False
else:
return False
def compute_reachable_global_prefixes(replayer):
print 'Computing globally reachable prefix states'
assert paceconfig(0).cached_prefix_states_file is not None and len(paceconfig(0).cached_prefix_states_file) > 0
prefix_cached_file = paceconfig(0).cached_prefix_states_file
interesting_prefix_states = []
final_reachable_prefix_fsync_deps = set()
if not os.path.isfile(prefix_cached_file):
print 'No cached file. Computing reachable prefixes from scratch.'
base_lists = replayer.ops_indexes().values()
list0 = base_lists[0]
list1 = base_lists[1]
interesting_prefix_states = []
# Algorithm to find all consistent cuts of persistent states:
# Naive method: Let us say there are 3 machines. Consider that the number of events
# in these traces from three machines as <n1, n2, n3>. So, there are n1 X n2 X n3
# ways in which these traces could combine.
# Should we check for everything?
# No, we can do better; intuition: if i X j is not consistent then any superset of
# it <i, j , k> for any k is inconsistent.
for index1 in list0:
for index2 in list1:
if replayer.is_legal_gp((index1, index2)):
interesting_prefix_states.append((index1, index2))
for i in range(2, len(base_lists)):
interesting_prefix_cache = []
for index in base_lists[i]:
for inter in interesting_prefix_states:
to_check = inter + (index, )
if replayer.is_legal_gp(to_check):
interesting_prefix_cache.append(to_check)
interesting_prefix_states = interesting_prefix_cache
for state in interesting_prefix_states:
index = 0
candidate = []
for point in state:
candidate.append(replayer.persistent_op_index(index, point))
index += 1
candidate = tuple(candidate)
final_reachable_prefix_fsync_deps.add(candidate)
with open(prefix_cached_file, "w") as f:
pickle.dump(final_reachable_prefix_fsync_deps, f, protocol = 0)
else:
print 'Using cached globally reachable states'
with open(prefix_cached_file, "r") as f:
final_reachable_prefix_fsync_deps = pickle.load(f)
final_reachable_prefix_no_deps = set(list(final_reachable_prefix_fsync_deps)[:])
assert not bool(final_reachable_prefix_no_deps.symmetric_difference(final_reachable_prefix_fsync_deps))
# We are mostly done here. But there is one more optimization that we could do.
# if a trace ends with fsync or fdatasync, then it can be skipped for replay
# because there is no specific operation that we need to replay fsyncs. However,
# they are important to calculate FS reordering dependencies. So, we maintain
# two sets: one with fsync deps (we will use when we apply FS reordering),
# one with no fsync deps that we will use to replay globally reachable prefixes
interesting_states_check = set(list(final_reachable_prefix_fsync_deps)[:])
for state in interesting_states_check:
machine = 0
for end_point in state:
if replayer.micro_ops[machine][end_point].op == 'fsync' or replayer.micro_ops[machine][end_point].op == 'fdatasync' or\
replayer.micro_ops[machine][end_point].op == 'file_sync_range':
prev_point = replayer.get_prev_op(state)
# if subsumed by another GRP, just remove this. If not subsumed, leave it
if prev_point in interesting_states_check:
final_reachable_prefix_no_deps.remove(state)
break
machine += 1
assert final_reachable_prefix_fsync_deps is not None and len(final_reachable_prefix_fsync_deps) > 0
assert final_reachable_prefix_no_deps is not None and len(final_reachable_prefix_no_deps) > 0
assert final_reachable_prefix_no_deps <= final_reachable_prefix_fsync_deps
return (final_reachable_prefix_fsync_deps, final_reachable_prefix_no_deps)
def replay_correlated_global_prefix(replayer, interesting_prefix_states, replay = True):
print 'Checking prefix crash states...'
machines = replayer.conceptual_machines()
replay_start = time.time()
count = 0
for crash_point in interesting_prefix_states:
assert len(crash_point) == len(machines)
base_name = get_crash_point_id_string(crash_point)
base_name += "_GRP"
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], get_crash_point_id_string(crash_point))
count += 1
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Prefix states : ' + str(count)
print 'Prefix replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_atomicity_prefix(replayer, interesting_prefix_states, client_index, replay = True):
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
majority_count = int(len(server_machines) / 2) + 1
assert server_count == 3 and majority_count == 2
count = 0
how_many_majorities = 1
pick_server_count = majority_count
replay_start = time.time()
replayer.set_environment(defaultfs('count', 3), defaultnet(), load_cross_deps = False)
apm_imposed_subset_machineset = list(itertools.combinations(server_machines, pick_server_count))
assert len(apm_imposed_subset_machineset) == nCr(server_count, majority_count)
apm_imposed_subset_machineset = apm_imposed_subset_machineset[0:how_many_majorities]
assert len(apm_imposed_subset_machineset) == 1
apm_imposed_machines = apm_imposed_subset_machineset[0]
for machine in machines:
replayer.load(machine, 0)
for crash_point in interesting_prefix_states:
atomic_ends = {}
atomic_currs = {}
machine = 0
for end_point in crash_point:
if machine in apm_imposed_machines:
atomic_ends[machine] = range(0, replayer.iops_len(machine, end_point))
atomic_currs[machine] = 0
machine += 1
atomic_end_list = []
while atleast_one_present(apm_imposed_machines, atomic_currs, atomic_ends):
atomic_end = {}
for machine in apm_imposed_machines:
if atomic_currs[machine] < len(atomic_ends[machine]):
atomic_end[machine] = atomic_ends[machine][atomic_currs[machine]]
else:
atomic_end[machine] = atomic_ends[machine][len(atomic_ends[machine])-1]
atomic_currs[machine] += 1
atomic_end_list.append(atomic_end)
for atomic_end in atomic_end_list:
for machine in server_machines:
if machine in apm_imposed_machines:
replayer.iops_end_at(machine, (crash_point[machine], atomic_end[machine]))
else:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
replayer.iops_end_at(client_index, (crash_point[client_index], replayer.iops_len(client_index, crash_point[client_index]) - 1))
base_name = replay_dir_base_name_AP(crash_point, atomic_end)
count += 1
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Atomicity Prefix correlated states : ' + str(count)
print 'Atomicity Prefix correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_reordering(replayer, interesting_prefix_states, client_index, replay = True):
def end_highest_so_far(machine, curr_endpoint):
machine_dict = can_omit_for_machine_endpoint[machine]
maximum = -1
for key in machine_dict.keys():
if key > maximum and key <= curr_endpoint:
maximum = key
return maximum
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
can_omit_ops = {}
can_omit_for_machine_endpoint = {}
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
majority_count = int(len(server_machines) / 2) + 1
# For now assert for 3 and 2 :)
assert server_count == 3 and majority_count == 2
for machine in machines:
can_omit_ops[machine] = defaultdict(list)
for machine in machines:
can_omit_for_machine_endpoint[machine] = defaultdict(list)
replay_start = time.time()
for machine in machines:
replayer.load(machine, 0)
# Phase 1: See what all ops can be dropped for each end point in a machine
# For example, let's say the GRP is (x, y, z). For x in machine0, there can
# be multiple ops that are before x and can still be dropped when we end at x
# For example, consider the follwing:
# x-2: creat(file)
# x-1: write(foo)
# x : write(bar)
# In the above trace, it is legal to drop creat when the machine crashes at x.
# In this phase, we will find all such points that can be dropped for each x.
for crash_point in interesting_prefix_states:
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
machine_id = 0
for end_point in crash_point:
can_end_highest = end_highest_so_far(machine_id, end_point)
if can_end_highest == -1:
omit_ops = [fs_op for fs_op in fs_ops[machine_id] if fs_op > -1 and fs_op < end_point]
else:
omit_ops1 = can_omit_for_machine_endpoint[machine_id][can_end_highest]
omit_ops2 = [fs_op for fs_op in fs_ops[machine_id] if fs_op >= can_end_highest and fs_op > -1 and fs_op < end_point]
omit_ops = omit_ops1 + omit_ops2
can_omit_temp = []
omit_ops_temp = []
for i in omit_ops:
replayer.mops_omit(machine_id, i)
if replayer.is_legal_reordering(machine_id):
can_omit_temp.append(i)
omit_ops_temp.append(i)
replayer.mops_include(machine_id, i)
can_omit_for_machine_endpoint[machine_id][end_point] = omit_ops_temp
can_omit_ops[machine_id][end_point] = can_omit_temp
machine_id += 1
# Phase 2: Using the points collected in phase 1, we can now see what points can be dropped across machines
# For example, for (x, y, z), if the drop dictionary looks like {x:[0, 2, 4], y:[1], z : [5, 7]}
# then we have 3*1*2 ways of dropping. Notice that we dont need to check if this is valid reordering
# It *has* to be valid state as the local drop points have been checked for this condition.
reordering_count = 0
pick_server_count = -1
how_many_majorities = 1
pick_server_count = majority_count
apm_imposed_subset_machineset = list(itertools.combinations(server_machines, pick_server_count))
assert len(apm_imposed_subset_machineset) == nCr(server_count, majority_count)
apm_imposed_subset_machineset = apm_imposed_subset_machineset[0:how_many_majorities]
for apm_imposed_machines in apm_imposed_subset_machineset:
for crash_point in interesting_prefix_states:
omittables = {}
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
for machine in apm_imposed_machines:
if machine != client_index:
omittables[machine] = can_omit_ops[machine][crash_point[machine]]
for omit_pt in list(dict_value_product(omittables)):
to_omit_list = []
for mac in omit_pt.keys():
curr_omit = omit_pt[mac]
to_omit_list.append(replayer.micro_ops[mac][curr_omit])
if check_logically_same(to_omit_list):
reordering_count += 1
replayer.mops_omit_group(omit_pt)
base_name = replay_dir_base_name_RO(crash_point, omit_pt)
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
replayer.mops_include_group(omit_pt)
del omittables
omittables = None
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Reordering correlated states : ' + str(reordering_count)
print 'Reordering correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def replay_correlated_atomicity_reordering(replayer, interesting_prefix_states, client_index, replay = True):
machines = replayer.conceptual_machines()
fs_ops = replayer.fs_ops_indexes()
can_omit_ops = {}
server_machines = machines[:]
server_machines.remove(client_index)
server_count = len(server_machines)
majority_count = int(len(server_machines) / 2) + 1
assert server_count == 3 and majority_count == 2
atomicity_reordering_count = 0
pick_server_count = majority_count
how_many_majorities = 1
replay_start = time.time()
replayer.set_environment(defaultfs('count', 3), defaultnet(), load_cross_deps = False)
apm_imposed_subset_machineset = list(itertools.combinations(server_machines, pick_server_count))
assert len(apm_imposed_subset_machineset) == nCr(server_count, majority_count)
apm_imposed_subset_machineset = apm_imposed_subset_machineset[0:how_many_majorities]
for machine in machines:
replayer.load(machine, 0)
for apm_imposed_machines in apm_imposed_subset_machineset:
for crash_point in interesting_prefix_states:
append_trunc_indexes = append_or_trunc_ops(replayer, server_machines, crash_point)
if any(append_trunc_indexes.values()):
# First, end all machine at the GRP point
machine = 0
for machine in machines:
replayer.iops_end_at(machine, (crash_point[machine], replayer.iops_len(machine, crash_point[machine]) - 1))
machine + 1
# Next we have to omit the sub (io or disk) ops as we call it
atomic_omits = {}
atomic_ro_currs = {}
machine = 0
for end_point in crash_point:
atomic_ro_currs[machine] = 0
if machine in apm_imposed_machines:
if append_trunc_indexes[machine]:
# If it is an append or trunc, break it into pieces and see for its absence
atomic_omits[machine] = range(0, replayer.iops_len(machine, end_point))
else:
# if not append, just put a marker. We will exclude this marker later
atomic_omits[machine] = [str(replayer.iops_len(machine, end_point)-1)]
machine +=1
atomic_omit_list = []
while atleast_one_present(apm_imposed_machines, atomic_ro_currs, atomic_omits):
atomic_omit = {}
for machine in apm_imposed_machines:
if atomic_ro_currs[machine] < len(atomic_omits[machine]):
atomic_omit[machine] = atomic_omits[machine][atomic_ro_currs[machine]]
else:
atomic_omit[machine] = None
atomic_ro_currs[machine] += 1
atomic_omit_list.append(atomic_omit)
for atomic_omit_x in atomic_omit_list:
atomic_omit = atomic_omit_x.copy()
base_name_prep = atomic_omit_x.copy()
for mac in apm_imposed_machines:
iop_index = atomic_omit[mac]
if type(iop_index) == str or iop_index == None:
del atomic_omit[mac]
else:
atomic_omit[mac] = (crash_point[mac], iop_index)
base_name_prep[mac] = (crash_point[mac], iop_index)
replayer.iops_omit_group(atomic_omit)
base_name = replay_dir_base_name_ARO(crash_point, base_name_prep)
atomicity_reordering_count += 1
if replay:
(base_path, dirnames,stdout_files) = get_replay_dirs(machines, base_name)
replayer.construct_crashed_dirs(dirnames, stdout_files)
MultiThreadedChecker.check_later(base_path, dirnames, stdout_files[machines[-1]], base_name)
replayer.iops_include_group(atomic_omit)
if replay:
MultiThreadedChecker.wait_and_get_outputs()
replay_end = time.time()
print 'Atomicity reordering correlated states : ' + str(atomicity_reordering_count)
print 'Atomicity reordering correlated replay took approx ' + str(replay_end-replay_start) + ' seconds...'
def check_corr_crash_vuls(pace_configs, sock_config, threads = 1, replay = False):
print 'Parsing traces to determine logical operations ...'
#initialize the replayer
replayer = DSReplayer(pace_configs, sock_config)
#set the environment - what file system (defaultfs)? what network(defaultnet)?
replayer.set_environment(defaultfs('count', 1), defaultnet(), load_cross_deps = True)
#did we parse and understand? if yes, print.
replayer.print_ops(show_io_ops = True)
print 'Successfully parsed logical operations!'
if replay == False:
return
assert threads > 0
for i in range(0, threads):
t = MultiThreadedChecker(MultiThreadedChecker.queue, i)
t.setDaemon(True)
t.start()
(reachable_prefix_fsync_deps, reachable_prefix_no_deps) = compute_reachable_global_prefixes(replayer)
grps_0_1_no_deps = unique_grp(reachable_prefix_no_deps, replayer.conceptual_machines(), [0,1])
grps_0_1_fsync_deps = unique_grp(reachable_prefix_fsync_deps, replayer.conceptual_machines(), [0,1])
MultiThreadedChecker.reset()
replay_correlated_global_prefix(replayer, grps_0_1_no_deps, True)
MultiThreadedChecker.reset()
replay_correlated_reordering(replayer, grps_0_1_fsync_deps, replayer.client_index, True)
MultiThreadedChecker.reset()
replay_correlated_atomicity_reordering(replayer, grps_0_1_no_deps, replayer.client_index, True)
MultiThreadedChecker.reset()
replay_correlated_atomicity_prefix(replayer, grps_0_1_no_deps, replayer.client_index, True)
uppath = lambda _path, n: os.sep.join(_path.split(os.sep)[:-n])
os.system('cp ' + os.path.join(uppath(paceconfig(0).cached_prefix_states_file, 1), 'micro_ops') + ' ' + paceconfig(0).scratchpad_dir) | mit |
canaltinova/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/browsers/edge.py | 7 | 2706 | from .base import Browser, ExecutorBrowser, require_arg
from ..webdriver_server import EdgeDriverServer
from ..executors import executor_kwargs as base_executor_kwargs
from ..executors.executorselenium import (SeleniumTestharnessExecutor,
SeleniumRefTestExecutor)
from ..executors.executoredge import EdgeDriverWdspecExecutor
__wptrunner__ = {"product": "edge",
"check_args": "check_args",
"browser": "EdgeBrowser",
"executor": {"testharness": "SeleniumTestharnessExecutor",
"reftest": "SeleniumRefTestExecutor",
"wdspec": "EdgeDriverWdspecExecutor"},
"browser_kwargs": "browser_kwargs",
"executor_kwargs": "executor_kwargs",
"env_extras": "env_extras",
"env_options": "env_options"}
def check_args(**kwargs):
require_arg(kwargs, "webdriver_binary")
def browser_kwargs(test_type, run_info_data, **kwargs):
return {"webdriver_binary": kwargs["webdriver_binary"],
"webdriver_args": kwargs.get("webdriver_args")}
def executor_kwargs(test_type, server_config, cache_manager, run_info_data,
**kwargs):
from selenium.webdriver import DesiredCapabilities
executor_kwargs = base_executor_kwargs(test_type, server_config,
cache_manager, **kwargs)
executor_kwargs["close_after_done"] = True
executor_kwargs["capabilities"] = dict(DesiredCapabilities.EDGE.items())
return executor_kwargs
def env_extras(**kwargs):
return []
def env_options():
return {"supports_debugger": False}
class EdgeBrowser(Browser):
used_ports = set()
def __init__(self, logger, webdriver_binary, webdriver_args=None):
Browser.__init__(self, logger)
self.server = EdgeDriverServer(self.logger,
binary=webdriver_binary,
args=webdriver_args)
self.webdriver_host = "localhost"
self.webdriver_port = self.server.port
def start(self, **kwargs):
print self.server.url
self.server.start()
def stop(self, force=False):
self.server.stop(force=force)
def pid(self):
return self.server.pid
def is_alive(self):
# TODO(ato): This only indicates the server is alive,
# and doesn't say anything about whether a browser session
# is active.
return self.server.is_alive()
def cleanup(self):
self.stop()
def executor_browser(self):
return ExecutorBrowser, {"webdriver_url": self.server.url}
| mpl-2.0 |
huangkuan/hack | lib/requests/packages/charade/sjisprober.py | 167 | 3825 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| apache-2.0 |
jhayworth/config | .emacs.d/elpy/rpc-venv/local/lib/python2.7/site-packages/setuptools/extern/__init__.py | 10 | 2514 | import sys
class VendorImporter:
"""
A PEP 302 meta path importer for finding optionally-vendored
or otherwise naturally-installed packages from root_name.
"""
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
"""
Search first the vendor package then as a natural package.
"""
yield self.vendor_pkg + '.'
yield ''
def find_module(self, fullname, path=None):
"""
Return self when fullname starts with root_name and the
target module is one vendored through this importer.
"""
root, base, target = fullname.partition(self.root_name + '.')
if root:
return
if not any(map(target.startswith, self.vendored_names)):
return
return self
def load_module(self, fullname):
"""
Iterate over the search path to locate and load fullname.
"""
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
# mysterious hack:
# Remove the reference to the extant package/module
# on later Python versions to cause relative imports
# in the vendor package to resolve the same modules
# as those going through this importer.
if sys.version_info >= (3, ):
del sys.modules[extant]
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def install(self):
"""
Install this importer into sys.meta_path if not already present.
"""
if self not in sys.meta_path:
sys.meta_path.append(self)
names = 'six', 'packaging', 'pyparsing', 'ordered_set',
VendorImporter(__name__, names, 'setuptools._vendor').install()
| gpl-3.0 |
py-amigos/adengine | tests/views/test_user.py | 1 | 2439 | import json
from adengine.model import User, Ad
NOT_FOUND_ERROR = {
"error": "Not found"
}
def build_api_url(id_=None):
if id_ is not None:
return "/api/users/{}".format(id_)
return "/api/users"
def _new_ad(user, text="ad-text"):
ad = Ad(text=text, author_id=user.id)
return ad
def _new_user(name='Peter'):
user = User(email='{name}@example.com'.format(name=name),
name=name,
username=name,
password_hash='12346')
return user
def _add_user(session, user):
return _add_resource(session, user)
def _add_resource(session, resource):
session.add(resource)
session.commit()
return resource
def test_user_added(client):
"""
User should be added to the database and ID generated.
"""
user = _new_user()
result = client.post(
build_api_url(),
data=json.dumps(user.as_dict()),
content_type='application/json'
)
assert 201 == result.status_code
def test_get_all_users(session, client):
"""
Should return all added users.
"""
# given
user1 = _new_user(name='Eugene')
user2 = _new_user(name='Vova')
_add_user(session, user1)
_add_user(session, user2)
# execute
all_users = json.loads(client.get(build_api_url()).data)
# verify
assert 2 == len(all_users.get("objects"))
def test_delete_non_existing_user(session, app):
"""
Should fail in attempt to delete non-existing user.
"""
# given
user_id = -1
client = app.test_client()
# exercise
query = '/api/v1.0/users/{user_id}'.format(user_id=user_id)
result = client.delete(query)
# verify
result.status_code == 404
def test_user_deleted(session, client):
"""Should delete user using View class for users."""
# given
user = _new_user(name='to-delete')
_add_user(session, user)
# exercise
result = client.delete(build_api_url(user.id))
# verify
assert result.status_code == 204
assert None == User.query.filter_by(id=user.id).first()
def test_get_user_by_id(session, app):
"""
Should return user by its identifier.
"""
# given
user = _new_user(name='Ivan')
_add_user(session, user)
client = app.test_client()
query = build_api_url(user.id)
# exercise
user_in_db = json.loads(client.get(query).data)
# verify
assert user.id == user_in_db.get('id')
| artistic-2.0 |
tyll/fas | plugins/fas-plugin-bugzilla/fas_bugzilla/__init__.py | 11 | 5856 | # -*- coding: utf-8 -*-
import turbogears
from turbogears import controllers, expose, paginate, identity, redirect, widgets, validate, validators, error_handler
from turbogears.database import session
from fas.model import Session, People
import cherrypy
from genshi.template.plugin import TextTemplateEnginePlugin
import fas.sidebar as sidebar
from fas.auth import *
import logging
import fas.plugin as plugin
from fas.model import Configs
class BugzillaSave(validators.Schema):
bugzilla_email = validators.Email(strip=True, max=128)
def get_configs(configs_list):
configs = {}
for config in configs_list:
configs[config.attribute] = config.value
return configs
class BugzillaPlugin(controllers.Controller):
capabilities = ['bugzilla_plugin']
def __init__(self):
'''Create a Bugzilla Controller.'''
self.path = ''
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas_bugzilla.templates.index")
def index(self):
username = turbogears.identity.current.user_name
person = People.by_username(username)
if turbogears.identity.current.user_name == username:
personal = True
else:
personal = False
user = People.by_username(turbogears.identity.current.user_name)
if is_admin(user):
admin = True
else:
admin = False
if turbogears.identity.current.user_name == username:
personal = True
else:
personal = False
configs = get_configs(Configs.query.filter_by(person_id=person.id, application='bugzilla').all())
if 'bugzilla_email' in configs:
is_set = True
else:
is_set = False
return dict(admin=admin, person=person, personal=personal, is_set=is_set, configs=configs)
@classmethod
def initPlugin(cls, controller):
cls.log = logging.getLogger('plugin.bugzilla')
cls.log.info('Bugzilla plugin initializing')
try:
path, self = controller.requestpath(cls, '/bugzilla')
cls.log.info('Bugzilla plugin hooked')
self.path = path
if self.sidebarentries not in sidebar.entryfuncs:
sidebar.entryfuncs.append(self.sidebarentries)
except (plugin.BadPathException,
plugin.PathUnavailableException), e:
cls.log.info('Bugzilla plugin hook failure: %s' % e)
def delPlugin(self, controller):
self.log.info('Bugzilla plugin shutting down')
if self.sidebarentries in sidebar.entryfuncs:
sidebar.entryfuncs.remove(self.sidebarentries)
def sidebarentries(self):
return [('Bugzilla plugin', self.path)]
@identity.require(turbogears.identity.not_anonymous())
@expose(template="fas_bugzilla.templates.edit")
def edit(self, targetname=None):
username = turbogears.identity.current.user_name
person = People.by_username(username)
target = People.by_username(targetname)
admin = is_admin(person)
configs = get_configs(Configs.query.filter_by(person_id=person.id, application='bugzilla').all())
if 'bugzilla_email' in configs:
email = configs['bugzilla_email']
else:
email = target.email
return dict(admin=admin, person=person, email=email, target=target)
@expose(template="fas.templates.help")
def help(self, id='none'):
help = { 'none' : [_('Error'), _('<p>We could not find that help item</p>')],
'bugzilla_change' : [_('Bugzilla'), _('<p>Bugzilla has a seperate authentication system. If you wish to have a different bugzilla address from your FAS address, please set that up here. Not doing so will cause permission issues.</p>')]
}
try:
helpItem = help[id]
except KeyError:
return dict(title=_('Error'), helpItem=[_('Error'), _('<p>We could not find that help item</p>')])
return dict(help=helpItem)
@expose(template="fas.templates.error")
def error(self, tg_errors=None):
'''Show a friendly error message'''
if not tg_errors:
turbogears.redirect('/')
return dict(tg_errors=tg_errors)
@identity.require(turbogears.identity.not_anonymous())
@validate(validators=BugzillaSave())
@error_handler(error)
@expose(template='fas_bugzilla.templates.edit')
def save(self, targetname, bugzilla_email):
person = People.by_username(turbogears.identity.current.user_name)
target = People.by_username(targetname)
if not can_edit_user(person, target):
turbogears.flash(_("You do not have permission to edit '%s'") % target.username)
turbogears.redirect('/bugzilla')
return dict()
new_configs = {'bugzilla_email': bugzilla_email}
cur_configs = Configs.query.filter_by(person_id=target.id, application='bugzilla').all()
if bugzilla_email == None:
session.delete(cur_configs[0])
turbogears.flash(_("Bugzilla specific email removed! This means your bugzilla email must be set to: %s" % target.email))
turbogears.redirect('/bugzilla/')
for config in cur_configs:
for new_config in new_configs.keys():
if config.attribute == new_config:
config.value = new_configs[new_config]
del(new_configs[new_config])
for config in new_configs:
c = Configs(application='bugzilla', attribute=config, value=new_configs[config])
target.configs.append(c)
turbogears.flash(_("Changes saved. Please allow up to 1 hour for changes to be realized."))
turbogears.redirect('/bugzilla/')
return dict() | gpl-2.0 |
farvardin/txt2tags-test | test/options/run.py | 4 | 24250 | #
# txt2tags command line options tester (http://txt2tags.org)
# See also: ../run.py ../lib.py
#
# Note: The .t2t files are generated dynamicaly, based on 'tests' dict data
#
import sys, os
sys.path.insert(0, '..')
import lib
del sys.path[0]
# sux
lib.OK = lib.FAILED = 0
lib.ERROR_FILES = []
# text patterns to compose source files
EMPTY_HEADER = "\n"
FULL_HEADER = "Header 1\nHeader 2\nHeader 3\n"
SIMPLE_BODY = "Text.\n"
TITLED_BODY = "= Title 1 =\nText.\n== Title 2 ==\nText.\n"
EMAIL = 'user@domain.com\n'
CONFIG_FILE_TXT = '%!target: html\n'
CSS_FILE_TXT = 'p { color: blue; }\n'
IMAGEFILE = '[../../samples/img/t2tpowered.png]'
# a nice postproc to rip off version information from output
VERSION_GOTCHA = "%!postproc: '(generated by txt2tags) [^ ]+' '\\1'\n"
# Sed commands to remove the SVN revision number from version
SED_NO_REVISION1 = r's/\(version [0-9]\.[0-9]\)\.[0-9][0-9]* </\1 </' # -V
SED_NO_REVISION2 = r's/\(-- Txt2tags [0-9]\.[0-9]\)\.[0-9][0-9]* /\1 /' # -vv
# the registered tests
tests = [
{
'name' : 'arguments-missing', # t2t
'content': '',
'cmdline': [""],
'redir' : ["> arguments-missing.out"],
'extra' : ['notarget', 'noinfile'],
}, {
'name' : 'infile-missing', # t2t -t html
'content': '',
'cmdline': ["-t html"],
'redir' : ["> infile-missing.out"],
'extra' : ['notarget', 'noinfile'],
}, {
'name' : 'infile-empty', # infile is empty
'content': '',
'cmdline': ['-t html'],
'redir' : ["> infile-empty.out"],
'extra' : ['notarget'],
}, {
'name' : 'infile-not-found-1', # infile -t html ERROR.t2t
'content': '',
'cmdline': ['-t html ERROR.t2t'],
'redir' : ["> infile-not-found-1.out"],
'extra' : ['notarget', 'noinfile'],
}, {
'name' : 'infile-not-found-2', # infile -t html -i ERROR.t2t
'content': '',
'cmdline': ['-t html -i ERROR.t2t'],
'redir' : ["> infile-not-found-2.out"],
'extra' : ['notarget', 'noinfile'],
}, {
'name' : 'target-invalid', # t2t --target ERROR
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--target ERROR"],
'redir' : ["> target-invalid.out"],
'extra' : ['notarget'],
}, {
'name' : 't-invalid', # t2t --t ERROR
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-t ERROR"],
'redir' : ["> t-invalid.out"],
'extra' : ['notarget'],
}, {
'name' : 'target-missing', # t2t infile.t2t
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': [""],
'redir' : ["> target-missing.out"],
'extra' : ['notarget'],
}, {
'name' : 'invalid-short-1', # t2t -z
'content': '',
'cmdline': ["-z"],
'redir' : ["> invalid-short-1.out"],
'extra' : ['notarget', 'noinfile'],
}, {
'name' : 'invalid-long-1', # t2t --zzzz
'content': '',
'cmdline': ["--zzzz"],
'redir' : ["> invalid-long-1.out"],
'extra' : ['notarget', 'noinfile'],
}, {
'name' : 'invalid-short-2', # t2t -z infile.t2t
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-z"],
'redir' : ["> invalid-short-2.out"],
'extra' : ['notarget'],
}, {
'name' : 'invalid-long-2', # t2t --zzzz infile.t2t
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--zzzz"],
'redir' : ["> invalid-long-2.out"],
'extra' : ['notarget'],
}, {
'name' : 'version', # t2t --version
'content': '',
'cmdline': ['--version'],
'redir' : ["> version.out"],
'extra' : ['notarget', 'noinfile', 'norevision'],
}, {
'name' : 'V', # t2t -V
'content': '',
'cmdline': ['-V'],
'redir' : ["> V.out"],
'extra' : ['notarget', 'noinfile', 'norevision'],
}, {
'name' : 'help', # t2t --help
'content': '',
'cmdline': ['--help'],
'redir' : ["> help.out"],
'extra' : ['notarget', 'noinfile', 'norevision'],
}, {
'name' : 'h', # t2t -h
'content': '',
'cmdline': ['-h'],
'redir' : ["> h.out"],
'extra' : ['notarget', 'noinfile', 'norevision'],
}, {
'name' : 'verbose-1', # t2t --verbose infile.t2t
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet --verbose -o-"],
'redir' : ["> verbose-1.txt"],
'extra' : ['norevision'],
}, {
'name' : 'verbose-2', # t2t --verbose*2 infile.t2t
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet --verbose --verbose -o-"],
'redir' : ["> verbose-2.txt"],
'extra' : ['norevision'],
}, {
'name' : 'verbose-3', # t2t --verbose*3 infile.t2t
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet --verbose --verbose --verbose -o-"],
'redir' : ["> verbose-3.txt"],
'extra' : ['norevision'],
}, {
'name' : 'verbose-4', # t2t --verbose*4 infile.t2t (same -vvv)
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet --verbose --verbose --verbose --verbose -o-"],
'redir' : ["> verbose-4.txt"],
'extra' : ['norevision'],
}, {
'name' : 'verbose-5', # t2t --verbose*5 infile.t2t (same -vvv)
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet --verbose --verbose --verbose --verbose --verbose -o-"],
'redir' : ["> verbose-5.txt"],
'extra' : ['norevision'],
}, {
'name' : 'v-1', # t2t -v infile.t2t
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -v -o-"],
'redir' : ["> v-1.txt"],
'extra' : ['norevision'],
}, {
'name' : 'v-2', # t2t -v -v infile.t2t
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -v -v -o-"],
'redir' : ["> v-2.txt"],
'extra' : ['norevision'],
}, {
'name' : 'v-3', # t2t -v -v -v infile.t2t
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -v -v -v -o-"],
'redir' : ["> v-3.txt"],
'extra' : ['norevision'],
}, {
'name' : 'v-4', # t2t -v -v -v -v infile.t2t (same -vvv)
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -v -v -v -v -o-"],
'redir' : ["> v-4.txt"],
'extra' : ['norevision'],
}, {
'name' : 'v-5', # t2t -v -v -v -v -v infile.t2t (same -vvv)
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -v -v -v -v -v -o-"],
'redir' : ["> v-5.txt"],
'extra' : ['norevision'],
}, {
'name' : 'vv', # t2t -vv infile.t2t
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -vv -o-"],
'redir' : ["> vv.txt"],
'extra' : ['norevision'],
}, {
'name' : 'vvv', # t2t -vvv infile.t2t
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -vvv -o-"],
'redir' : ["> vvv.txt"],
'extra' : ['norevision'],
}, {
'name' : 'vvvv', # t2t -vvvv infile.t2t (same -vvv)
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -vvvv -o-"],
'redir' : ["> vvvv.txt"],
'extra' : ['norevision'],
}, {
'name' : 'vvvvv', # t2t -vvvvv infile.t2t (same -vvv)
'target' : 'txt',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -vvvvv -o-"],
'redir' : ["> vvvvv.txt"],
'extra' : ['norevision'],
}, {
'name' : 'q-verbose', # t2t -q -vvv infile.t2t
'target' : 'txt',
'content': FULL_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet -q -vvv -o-"],
'redir' : ["> q-verbose.txt"],
}, {
'name' : 'quiet-verbose', # t2t --quiet -vvv infile.t2t
'target' : 'txt',
'content': FULL_HEADER+SIMPLE_BODY,
'cmdline': ["--no-quiet --quiet -vvv -o-"],
'redir' : ["> quiet-verbose.txt"],
}, {
'name' : 'target', # t2t --target html infile.t2t
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --target html"],
'extra' : ['notarget'],
}, {
'name' : 't', # t2t -t html infile.t2t
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H -t html"],
'extra' : ['notarget'],
}, {
'name' : 'infile',
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --infile"],
}, {
'name' : 'no-infile-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --no-infile"],
}, {
'name' : 'no-infile-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --infile fake --no-infile"],
}, {
'name' : 'no-infile-3', # turning OFF multiple
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --infile fake1 --infile fake2 --no-infile"],
}, {
'name' : 'i',
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H -i"],
}, {
'name' : 'outfile-1', # same name as default
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --outfile outfile-1.html"],
}, {
'name' : 'outfile-2', # different name
'target' : 'foo',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H -t html --outfile outfile-2.foo"],
'extra' : ['notarget'],
}, {
'name' : 'no-outfile-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --no-outfile"],
}, {
'name' : 'no-outfile-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --outfile fake --no-outfile"],
}, {
'name' : 'o', # same name as default
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H -o o.html"],
}, {
'name' : 'enum-title-1',
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --enum-title"],
}, {
'name' : 'enum-title-2', # with --toc
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --toc --enum-title"],
}, {
'name' : 'enum-title-3', # no title to enumerate
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --enum-title"],
}, {
'name' : 'no-enum-title-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --no-enum-title"],
}, {
'name' : 'no-enum-title-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --enum-title --no-enum-title"],
}, {
'name' : 'n',
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H -n"],
}, {
'name' : 'toc-1',
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --toc"],
}, {
'name' : 'toc-2', # empty toc (no title)
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --toc"],
}, {
'name' : 'toc-3', # empty body
'target' : 'html',
'content': EMPTY_HEADER,
'cmdline': ["-H --toc"],
}, {
'name' : 'no-toc-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --no-toc"],
}, {
'name' : 'no-toc-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --toc --no-toc"],
}, {
'name' : 'toc-level-1',
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --toc --toc-level 1"],
}, {
'name' : 'toc-level-2', # very deep
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --toc --toc-level 999"],
}, {
'name' : 'toc-level-3', # useless (no --toc)
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --toc-level 1"],
}, {
'name' : 'toc-only-1',
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["--toc-only -o toc-only-1.html"],
}, {
'name' : 'toc-only-2', # empty toc (no title)
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--toc-only -o toc-only-2.html"],
}, {
'name' : 'toc-only-3', # no target, defaults to txt
'target' : 'out',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["--toc-only -o toc-only-3.out"],
'extra' : ['notarget'],
}, {
'name' : 'toc-only-4', # with --toc-level
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["--toc-only --toc-level 1 -o toc-only-4.html"],
}, {
'name' : 'toc-only-5', # with --enum-title
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["--toc-only --enum-title -o toc-only-5.html"],
}, {
'name' : 'no-toc-only-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --no-toc-only"],
}, {
'name' : 'no-toc-only-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+TITLED_BODY,
'cmdline': ["-H --toc-only --no-toc-only"],
}, {
'name' : 'mask-email',
'target' : 'html',
'content': EMPTY_HEADER+EMAIL,
'cmdline': ["-H --mask-email"],
}, {
'name' : 'no-mask-email-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+EMAIL,
'cmdline': ["-H --no-mask-email"],
}, {
'name' : 'no-mask-email-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+EMAIL,
'cmdline': ["-H --mask-email --no-mask-email"],
}, {
'name' : 'headers-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--headers"],
}, {
'name' : 'headers-2', # turning OFF --no-headers
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--no-headers --headers"],
}, {
'name' : 'no-headers',
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--no-headers"],
}, {
'name' : 'H',
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H"],
}, {
'name' : 'encoding-1',
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--encoding iso-8859-1"],
}, {
'name' : 'encoding-2', # normalization
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--encoding ISO88591"],
}, {
'name' : 'encoding-3', # customized
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--encoding fake-999"],
}, {
'name' : 'encoding-4', # LaTeX translation
'target' : 'tex',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--encoding iso-8859-1"],
}, {
'name' : 'no-encoding-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--no-encoding"],
}, {
'name' : 'no-encoding-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--encoding iso-8859-1 --no-encoding"],
}, {
'name' : 'style-1',
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--style", lib.CSS_FILE],
}, {
'name' : 'style-2', # multiple declaration
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--style other.css --style", lib.CSS_FILE],
}, {
'name' : 'style-3', # LaTeX package
'target' : 'tex',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--style mypackage"],
}, {
'name' : 'style-4', # LaTeX multiple package
'target' : 'tex',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--style mypackage,otherpackage,another"],
}, {
'name' : 'style-5', # LaTeX module no .sty
'target' : 'tex',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--style foo.sty --style bar.STY --style baz"],
}, {
'name' : 'no-style-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--no-style"],
}, {
'name' : 'no-style-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--style fake.css --no-style"],
}, {
'name' : 'css-sugar-1', # just body
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --css-sugar"],
}, {
'name' : 'css-sugar-2', # empty toc & body
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --toc --css-sugar"],
}, {
'name' : 'css-sugar-3', # headers, toc & body
'target' : 'html',
'content': FULL_HEADER+VERSION_GOTCHA+TITLED_BODY,
'cmdline': ["--toc --css-sugar"],
}, {
'name' : 'no-css-sugar-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --no-css-sugar"],
}, {
'name' : 'no-css-sugar-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["-H --css-sugar --no-css-sugar"],
}, {
'name' : 'css-inside-1',
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--css-inside --style", lib.CSS_FILE],
'extra' : ['css'],
}, {
'name' : 'css-inside-2', # with --css-sugar
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--css-sugar --css-inside --style", lib.CSS_FILE],
'extra' : ['css'],
}, {
'name' : 'css-inside-3', # missing CSS file
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'redir' : ["> css-inside-3.out"],
'cmdline': ["-t html --css-inside --style", lib.CSS_FILE],
'extra' : ['notarget'],
}, {
'name' : 'css-inside-4', # no --style
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--css-sugar --css-inside"],
}, {
'name' : 'css-inside-5', # two CSS files
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--css-inside --style "+ lib.CSS_FILE +" --style "+ lib.CSS_FILE],
'extra' : ['css'],
}, {
'name' : 'css-inside-6', # two CSS files, one missing
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'redir' : ["> css-inside-6.out"],
'cmdline': ["-t html --css-inside --style missing.css --style "+ lib.CSS_FILE],
'extra' : ['css', 'notarget'],
}, {
'name' : 'no-css-inside-1', # useless
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--no-css-inside --style", lib.CSS_FILE],
'extra' : ['css'],
}, {
'name' : 'no-css-inside-2', # turning OFF
'target' : 'html',
'content': EMPTY_HEADER+VERSION_GOTCHA+SIMPLE_BODY,
'cmdline': ["--css-inside --no-css-inside --style", lib.CSS_FILE],
'extra' : ['css'],
### Now fully tested in test/includeconf
# }, {
# 'name' : 'config-file',
# 'target' : 'html',
# 'content': EMPTY_HEADER+SIMPLE_BODY,
# 'cmdline': ["-H --config-file", lib.CONFIG_FILE],
# 'extra' : ['config', 'notarget'],
# }, {
# 'name' : 'C',
# 'target' : 'html',
# 'content': EMPTY_HEADER+SIMPLE_BODY,
# 'cmdline': ["-H -C", lib.CONFIG_FILE],
# 'extra' : ['config', 'notarget'],
}, {
'name' : 'dump-config',
'content': EMPTY_HEADER+CONFIG_FILE_TXT+SIMPLE_BODY,
'cmdline': ["--dump-config"],
'redir' : ["> dump-config.out"],
'extra' : ['notarget'],
}, {
'name' : 'no-dump-config',
'content': EMPTY_HEADER+CONFIG_FILE_TXT+SIMPLE_BODY,
'cmdline': ["-H -o- --dump-config --no-dump-config"],
'redir' : ["> no-dump-config.out"],
'extra' : ['notarget'],
}, {
'name' : 'dump-source',
'content': FULL_HEADER+CONFIG_FILE_TXT+SIMPLE_BODY,
'cmdline': ["--dump-source"],
'redir' : ["> dump-source.out"],
'extra' : ['notarget'],
}, {
'name' : 'no-dump-source',
'content': EMPTY_HEADER+CONFIG_FILE_TXT+SIMPLE_BODY,
'cmdline': ["-H -o- --dump-source --no-dump-source"],
'redir' : ["> no-dump-source.out"],
'extra' : ['notarget'],
}, {
'name' : 'targets',
'content': EMPTY_HEADER+SIMPLE_BODY,
'cmdline': ["--targets"],
'redir' : ["> targets.out"],
'extra' : ['notarget'],
}, {
'name' : 'no-targets',
'content': EMPTY_HEADER+CONFIG_FILE_TXT+SIMPLE_BODY,
'cmdline': ["-H -o- --targets --no-targets"],
'redir' : ["> no-targets.out"],
'extra' : ['notarget'],
}, {
'name' : 'image-embed-rtf',
'content': EMPTY_HEADER+IMAGEFILE,
'target' : 'rtf',
'cmdline': ["--embed-images"],
}
]
def run():
for test in tests:
infile = test['name'] + '.t2t'
outfile = test['name'] + '.' + (test.get('target') or 'out')
extra = test.get('extra') or []
cmdline = test['cmdline']
if not 'noinfile' in extra:
cmdline = test['cmdline'] + [infile]
if lib.initTest(test['name'], infile, outfile):
# create the extra files (if needed for this test)
if 'config' in extra:
lib.WriteFile(lib.CONFIG_FILE, CONFIG_FILE_TXT)
if 'css' in extra:
lib.WriteFile(lib.CSS_FILE, CSS_FILE_TXT)
# may I add the -t target automatically?
if not 'notarget' in extra:
cmdline = ['-t', test['target']] + cmdline
# may I remove the SVN revision number?
if 'norevision' in extra:
cmdline.append(' | sed "%s;%s"' % (
SED_NO_REVISION1,
SED_NO_REVISION2)
)
# may I redirect the output to a file?
if test.get('redir'):
cmdline.extend(test['redir'])
# always catch the error output
cmdline.append('2>&1')
# create the source file
lib.WriteFile(infile, test['content'])
# convert and check results
lib.convert(cmdline)
lib.diff(outfile)
lib.convert(cmdline, True)
lib.diff(outfile)
# remove the trash
os.remove(infile)
if os.path.isfile(lib.CSS_FILE):
os.remove(lib.CSS_FILE)
if os.path.isfile(lib.CONFIG_FILE):
os.remove(lib.CONFIG_FILE)
return lib.OK, lib.FAILED, lib.ERROR_FILES
if __name__ == '__main__':
print lib.MSG_RUN_ALONE
| gpl-2.0 |
leedm777/ansible | lib/ansible/plugins/shell/sh.py | 34 | 6175 | # (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import pipes
import ansible.constants as C
import time
import random
_USER_HOME_PATH_RE = re.compile(r'^~[_.A-Za-z0-9][-_.A-Za-z0-9]*$')
class ShellModule(object):
# How to end lines in a python script one-liner
_SHELL_EMBEDDED_PY_EOL = '\n'
def env_prefix(self, **kwargs):
'''Build command prefix with environment variables.'''
env = dict(
LANG = C.DEFAULT_MODULE_LANG,
LC_CTYPE = C.DEFAULT_MODULE_LANG,
LC_MESSAGES = C.DEFAULT_MODULE_LANG,
)
env.update(kwargs)
return ' '.join(['%s=%s' % (k, pipes.quote(unicode(v))) for k,v in env.items()])
def join_path(self, *args):
return os.path.join(*args)
def path_has_trailing_slash(self, path):
return path.endswith('/')
def chmod(self, mode, path):
path = pipes.quote(path)
return 'chmod %s %s' % (mode, path)
def remove(self, path, recurse=False):
path = pipes.quote(path)
if recurse:
return "rm -rf %s >/dev/null 2>&1" % path
else:
return "rm -f %s >/dev/null 2>&1" % path
def mkdtemp(self, basefile=None, system=False, mode=None):
if not basefile:
basefile = 'ansible-tmp-%s-%s' % (time.time(), random.randint(0, 2**48))
basetmp = self.join_path(C.DEFAULT_REMOTE_TMP, basefile)
if system and (basetmp.startswith('$HOME') or basetmp.startswith('~/')):
basetmp = self.join_path('/tmp', basefile)
cmd = 'mkdir -p "%s"' % basetmp
if mode:
cmd += ' && chmod %s "%s"' % (mode, basetmp)
cmd += ' && echo "%s"' % basetmp
return cmd
def expand_user(self, user_home_path):
''' Return a command to expand tildes in a path
It can be either "~" or "~username". We use the POSIX definition of
a username:
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_426
http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap03.html#tag_03_276
'''
# Check that the user_path to expand is safe
if user_home_path != '~':
if not _USER_HOME_PATH_RE.match(user_home_path):
# pipes.quote will make the shell return the string verbatim
user_home_path = pipes.quote(user_home_path)
return 'echo %s' % user_home_path
def checksum(self, path, python_interp):
# The following test needs to be SH-compliant. BASH-isms will
# not work if /bin/sh points to a non-BASH shell.
#
# In the following test, each condition is a check and logical
# comparison (|| or &&) that sets the rc value. Every check is run so
# the last check in the series to fail will be the rc that is
# returned.
#
# If a check fails we error before invoking the hash functions because
# hash functions may successfully take the hash of a directory on BSDs
# (UFS filesystem?) which is not what the rest of the ansible code
# expects
#
# If all of the available hashing methods fail we fail with an rc of
# 0. This logic is added to the end of the cmd at the bottom of this
# function.
# Return codes:
# checksum: success!
# 0: Unknown error
# 1: Remote file does not exist
# 2: No read permissions on the file
# 3: File is a directory
# 4: No python interpreter
# Quoting gets complex here. We're writing a python string that's
# used by a variety of shells on the remote host to invoke a python
# "one-liner".
shell_escaped_path = pipes.quote(path)
test = "rc=flag; [ -r %(p)s ] || rc=2; [ -f %(p)s ] || rc=1; [ -d %(p)s ] && rc=3; %(i)s -V 2>/dev/null || rc=4; [ x\"$rc\" != \"xflag\" ] && echo \"${rc} \"%(p)s && exit 0" % dict(p=shell_escaped_path, i=python_interp)
csums = [
"({0} -c 'import hashlib; BLOCKSIZE = 65536; hasher = hashlib.sha1();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python > 2.4 (including python3)
"({0} -c 'import sha; BLOCKSIZE = 65536; hasher = sha.sha();{2}afile = open(\"'{1}'\", \"rb\"){2}buf = afile.read(BLOCKSIZE){2}while len(buf) > 0:{2}\thasher.update(buf){2}\tbuf = afile.read(BLOCKSIZE){2}afile.close(){2}print(hasher.hexdigest())' 2>/dev/null)".format(python_interp, shell_escaped_path, self._SHELL_EMBEDDED_PY_EOL), # Python == 2.4
]
cmd = " || ".join(csums)
cmd = "%s; %s || (echo \'0 \'%s)" % (test, cmd, shell_escaped_path)
return cmd
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
# don't quote the cmd if it's an empty string, because this will
# break pipelining mode
if cmd.strip() != '':
cmd = pipes.quote(cmd)
cmd_parts = [env_string.strip(), shebang.replace("#!", "").strip(), cmd]
new_cmd = " ".join(cmd_parts)
if rm_tmp:
new_cmd = '%s; rm -rf "%s" >/dev/null 2>&1' % (new_cmd, rm_tmp)
return new_cmd
| gpl-3.0 |
tartley/extending_unittest | src/all_dirs_runner.py | 1 | 3508 | '''
A test runner that augments Django's standard one by finding subclasses of
unittest.TestCase no matter where they are located in the project, even in
directories which are not django apps. (the default test runner only looks in
particular modules within each django app.)
See also tests.utils.testrunner, which uses this.
'''
from inspect import getmembers, isclass
import os
from os.path import join, relpath, splitext
import sys
from unittest import TestCase, TestLoader, TestSuite
from django.test.simple import reorder_suite, DjangoTestSuiteRunner
from django.test.testcases import TestCase as DjangoTestCase
SKIP_TEST_CLASSES = set([
TestCase, DjangoTestCase,
])
def _get_module_names(root):
'''
Yield all the Python modules in the given root dir and its subdirs
'''
for subdir, dirs, fnames in os.walk(root):
for fname in fnames:
for directory in dirs:
if directory.startswith('.') or directory == 'talk':
dirs.remove(directory)
if fname.endswith('.py'):
yield relpath(join(subdir, fname))
def _to_importable_name(fname):
'''
Convert the filename of a module into the module name used to import it.
e.g. 'ordering/tests/my_test.py' -> 'esperanto.ordering.tests.my_test'
'''
fname, _ = splitext(fname)
modname = fname.replace('/', '.')
if modname.endswith('.__init__'):
modname = modname[:-9]
return modname
def _import(modname):
'''
Given a module name in 'ordering.blobs' format, imports and returns it
'''
__import__(modname)
return sys.modules[modname]
def _get_testcases(module):
'''
Yield all the TestCase subclasses defined in the given module.
'''
for name, value in getmembers(module):
if (
isclass(value) and
issubclass(value, TestCase) and
value not in SKIP_TEST_CLASSES
):
yield value
class AllDirsTestRunner(DjangoTestSuiteRunner):
def _test_matches(self, testname, command_line):
'''
Returns True if the named test should be included in the suite
'''
return (
not command_line or
any(arg in testname for arg in command_line)
)
def build_suite(self, test_labels, extra_tests=None, **kwargs):
'''
Override the base class method to return a suite consisting of all
TestCase subclasses throughought the whole project.
'''
if test_labels:
suite = TestSuite()
else:
suite = DjangoTestSuiteRunner.build_suite(
self, test_labels, extra_tests, **kwargs
)
added_test_classes = set(t.__class__ for t in suite)
loader = TestLoader()
for fname in _get_module_names(os.getcwd()):
module = _import(_to_importable_name(fname))
for test_class in _get_testcases(module):
if test_class in added_test_classes:
continue
for method_name in loader.getTestCaseNames(test_class):
testname = '.'.join([
module.__name__, test_class.__name__, method_name
])
if self._test_matches(testname, test_labels):
suite.addTest(loader.loadTestsFromName(testname))
added_test_classes.add(test_class)
return reorder_suite(suite, (TestCase,))
| bsd-3-clause |
Healthcast/RSV | python/all_year_predict/methods.py | 2 | 3879 | #!/usr/bin/pyhton
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, neighbors, linear_model
from sklearn import svm
from sklearn import metrics
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import RandomForestClassifier
def apply_algorithm(paras, X, y):
if paras['clf'] == 'svm':
clf = svm.SVC(kernel=paras['svm'][1], C=paras['svm'][0], probability=True)
elif paras['clf'] == 'knn':
clf = neighbors.KNeighborsClassifier(paras['knn'][0],\
weights=paras['knn'][1])
elif paras['clf'] == 'rf':
clf = RandomForestClassifier(max_depth=paras['rf'][0], \
n_estimators=paras['rf'][1],\
max_features=paras['rf'][2])
else:
print str("unknown classifier")
sys.exit(2)
return clf
def apply_evaluation(paras, X, y, clf, data):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.3, \
random_state=0)
clf.fit(X_train, y_train)
r = clf.predict(X_test)
d = clf.decision_function(X)
p = clf.predict_proba(X).T[1]*3
h = data["hospital"].T[data["city"].index(paras["city"])]
h1 = h.astype(float)
m = max(h1)
h1=h1/m*4
plt.figure()
# plt.plot(d)
plt.plot(y)
plt.plot(h1)
plt.plot(p)
# height = 4
# bottom = -2
# ss = data["season_start"]
# date=data["date1"]
# c_id = data["city"].index(paras["city"])
# ylabel = data["ylabels"]
# for m in ss:
# plt.plot([m, m],[bottom, height], 'y--', linewidth=1)
#
# for m in range(1, len(ss)-1):
# a = ss[m]
# plt.text(a-5,height, date[a].split('-')[0])
#
# #plot the start week
# up=1
# for j in range(len(ylabel.T[c_id])-1):
# if ylabel.T[c_id,j] == 1 :
# plt.plot([j, j],[bottom, height], 'k-', linewidth=2)
# if up==1:
# plt.text(j-10, height-1, date[j])
# up=0
# else:
# plt.text(j-10, height-2, date[j])
# up=1
#
plt.show()
#plot the results
# x_min, x_max = X_train[:, 0].min() - 1, X_train[:, 0].max() + 1
# y_min, y_max = X_train[:, 1].min() - 1, X_train[:, 1].max() + 1
#
# xx, yy = np.meshgrid(np.arange(x_min, x_max, 1), np.arange(y_min, y_max, 1))
# Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Z = Z.reshape(xx.shape)
#
# plt.figure()
# plt.pcolormesh(xx, yy, Z)
# plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train)
# plt.xlim(xx.min(), xx.max())
# plt.ylim(yy.min(), yy.max())
# plt.title("binary classification classification")
# plt.show()
#
if paras['eva'] == 'accuracy':
print "The accuracy:"
print metrics.accuracy_score(y_test, r)
elif paras['eva'] == 'precision':
print "The precision:"
print metrics.precision_score(y_test, r)
elif paras['eva'] == 'recall':
print "The recall:"
print metrics.recall_score(y_test, r)
elif paras['eva'] == 'confusion':
print "The confusion matrix:"
print metrics.confusion_matrix(y_test, r)
elif paras['eva'] == 'report':
print "The report:"
print metrics.classification_report(y_test, r)
elif paras['eva'] == 'roc' and paras['clf'] == 'svm':
scores = clf.decision_function(X_test)
print "The auc:"
fpr, tpr, thresholds = metrics.roc_curve(y_test, scores)
roc_auc = metrics.auc(fpr, tpr)
print str(roc_auc)
plt.figure()
plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.show()
| gpl-2.0 |
diogocs1/comps | web/addons/base_import_module/tests/test_module/__openerp__.py | 377 | 1290 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Test Module',
'category': 'Website',
'summary': 'Custom',
'version': '1.0',
'description': """
Test
""",
'author': 'OpenERP SA',
'depends': ['website'],
'data': [
'test.xml',
],
'installable': True,
'application': True,
}
| apache-2.0 |
hrishioa/Navo | Raspi-Code/Lib/venv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py | 436 | 117347 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import with_metaclass
import types
from . import inputstream
from . import tokenizer
from . import treebuilders
from .treebuilders._base import Marker
from . import utils
from . import constants
from .constants import spaceCharacters, asciiUpper2Lower
from .constants import specialElements
from .constants import headingElements
from .constants import cdataElements, rcdataElements
from .constants import tokenTypes, ReparseException, namespaces
from .constants import htmlIntegrationPointElements, mathmlTextIntegrationPointElements
from .constants import adjustForeignAttributes as adjustForeignAttributesMap
from .constants import E
def parse(doc, treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
"""Parse a string or file-like object into a tree"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, encoding=encoding)
def parseFragment(doc, container="div", treebuilder="etree", encoding=None,
namespaceHTMLElements=True):
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, encoding=encoding)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser. Generates a tree structure from a stream of (possibly
malformed) HTML"""
def __init__(self, tree=None, tokenizer=tokenizer.HTMLTokenizer,
strict=False, namespaceHTMLElements=True, debug=False):
"""
strict - raise an exception when a parse error is encountered
tree - a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
tokenizer - a class that provides a stream of tokens to the treebuilder.
This may be replaced for e.g. a sanitizer which converts some tags to
text
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.tokenizer_class = tokenizer
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div",
encoding=None, parseMeta=True, useChardet=True, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.tokenizer = self.tokenizer_class(stream, encoding=encoding,
parseMeta=parseMeta,
useChardet=useChardet,
parser=self, **kwargs)
self.reset()
while True:
try:
self.mainLoop()
break
except ReparseException:
self.reset()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""The name of the character encoding
that was used to decode the input stream,
or :obj:`None` if that is not determined yet.
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0]
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
new_token = token
while new_token is not None:
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and token["selfClosing"]
and not token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, encoding=None, parseMeta=True, useChardet=True):
"""Parse a HTML document into a well-formed tree
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, innerHTML=False, encoding=encoding,
parseMeta=parseMeta, useChardet=useChardet)
return self.tree.getDocument()
def parseFragment(self, stream, container="div", encoding=None,
parseMeta=False, useChardet=True):
"""Parse a HTML fragment into a well-formed tree fragment
container - name of the element we're setting the innerHTML property
if set to None, default to 'div'
stream - a filelike object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
"""
self._parse(stream, True, container=container, encoding=encoding)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars={}):
# XXX The idea is to make errorcode mandatory.
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError(E[errorcode] % datavars)
def normalizeToken(self, token):
""" HTML5 specific normalizations to the token stream """
if token["type"] == tokenTypes["StartTag"]:
token["data"] = dict(token["data"][::-1])
return token
def adjustMathMLAttributes(self, token):
replacements = {"definitionurl": "definitionURL"}
for k, v in replacements.items():
if k in token["data"]:
token["data"][v] = token["data"][k]
del token["data"][k]
def adjustSVGAttributes(self, token):
replacements = {
"attributename": "attributeName",
"attributetype": "attributeType",
"basefrequency": "baseFrequency",
"baseprofile": "baseProfile",
"calcmode": "calcMode",
"clippathunits": "clipPathUnits",
"contentscripttype": "contentScriptType",
"contentstyletype": "contentStyleType",
"diffuseconstant": "diffuseConstant",
"edgemode": "edgeMode",
"externalresourcesrequired": "externalResourcesRequired",
"filterres": "filterRes",
"filterunits": "filterUnits",
"glyphref": "glyphRef",
"gradienttransform": "gradientTransform",
"gradientunits": "gradientUnits",
"kernelmatrix": "kernelMatrix",
"kernelunitlength": "kernelUnitLength",
"keypoints": "keyPoints",
"keysplines": "keySplines",
"keytimes": "keyTimes",
"lengthadjust": "lengthAdjust",
"limitingconeangle": "limitingConeAngle",
"markerheight": "markerHeight",
"markerunits": "markerUnits",
"markerwidth": "markerWidth",
"maskcontentunits": "maskContentUnits",
"maskunits": "maskUnits",
"numoctaves": "numOctaves",
"pathlength": "pathLength",
"patterncontentunits": "patternContentUnits",
"patterntransform": "patternTransform",
"patternunits": "patternUnits",
"pointsatx": "pointsAtX",
"pointsaty": "pointsAtY",
"pointsatz": "pointsAtZ",
"preservealpha": "preserveAlpha",
"preserveaspectratio": "preserveAspectRatio",
"primitiveunits": "primitiveUnits",
"refx": "refX",
"refy": "refY",
"repeatcount": "repeatCount",
"repeatdur": "repeatDur",
"requiredextensions": "requiredExtensions",
"requiredfeatures": "requiredFeatures",
"specularconstant": "specularConstant",
"specularexponent": "specularExponent",
"spreadmethod": "spreadMethod",
"startoffset": "startOffset",
"stddeviation": "stdDeviation",
"stitchtiles": "stitchTiles",
"surfacescale": "surfaceScale",
"systemlanguage": "systemLanguage",
"tablevalues": "tableValues",
"targetx": "targetX",
"targety": "targetY",
"textlength": "textLength",
"viewbox": "viewBox",
"viewtarget": "viewTarget",
"xchannelselector": "xChannelSelector",
"ychannelselector": "yChannelSelector",
"zoomandpan": "zoomAndPan"
}
for originalName in list(token["data"].keys()):
if originalName in replacements:
svgName = replacements[originalName]
token["data"][svgName] = token["data"][originalName]
del token["data"][originalName]
def adjustForeignAttributes(self, token):
replacements = adjustForeignAttributesMap
for originalName in token["data"].keys():
if originalName in replacements:
foreignName = replacements[originalName]
token["data"][foreignName] = token["data"][originalName]
del token["data"][originalName]
def reparseTokenNormal(self, token):
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
"""Generic RCDATA/RAWTEXT Parsing algorithm
contentType - RCDATA or RAWTEXT
"""
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
constants.tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in constants.tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html"
or publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//"))
or publicId in
("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html")
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None
or systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//"))
or publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noscript", "noframes", "style"), self.startTagNoScriptNoFramesStyle),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self. endTagHandler = utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoScriptNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
# XXX If we implement a parser for which scripting is disabled we need to
# implement this phase.
#
# class InHeadNoScriptPhase(Phase):
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Keep a ref to this for special handling of whitespace in <pre>
self.processSpaceCharactersNonPre = self.processSpaceCharacters
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"details", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
(("noembed", "noframes", "noscript"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
if node1.name != node2.name or node1.namespace != node2.namespace:
return False
elif len(node1.attributes) != len(node2.attributes):
return False
else:
attributes1 = sorted(node1.attributes.items())
attributes2 = sorted(node2.attributes.items())
for attr1, attr2 in zip(attributes1, attributes2):
if attr1 != attr2:
return False
return True
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea")
and not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharacters(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1
or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"expectedName": "body", "gotName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
# XXX "inHeadNoscript": InHeadNoScriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| gpl-2.0 |
StudyBlue/sblibs | sblibs/display/general.py | 1 | 1801 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © Manoel Vilela 2016
#
# @project: Decorating
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
# pylint: disable=redefined-builtin
# pylint: disable=invalid-name
"""
An collection of usefull decorators for debug
and time evaluation of functions flow
"""
# stdlib
from functools import wraps
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2: # pragma: no cover
from itertools import izip
zip = izip
else: # pragma: no cover
zip = zip
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
# Copied from `six' library.
# Copyright (c) 2010-2015 Benjamin Peterson
# License: MIT
class metaclass(meta):
"""Dummy metaclass"""
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, 'temporary_class', (), {})
def cache(function):
"""
Function: cache
Summary: Decorator used to cache the input->output
Examples: An fib memoized executes at O(1) time
instead O(e^n)
Attributes:
@param (function): function
Returns: wrapped function
TODO: Give support to functions with kwargs
"""
memory = {}
miss = object()
@wraps(function)
def _wrapper(*args):
result = memory.get(args, miss)
if result is miss:
_wrapper.call += 1
result = function(*args)
memory[args] = result
return result
_wrapper.call = 0
return _wrapper
| bsd-2-clause |
Antiun/odoomrp-wip | machine_purchase/models/account_invoice.py | 29 | 2548 |
from openerp import models, fields, api
class AccountInvoice(models.Model):
_inherit = 'account.invoice'
@api.multi
def invoice_validate(self):
res = super(AccountInvoice, self).invoice_validate()
machinery_obj = self.env['machinery']
purchase_obj = self.env['purchase.order']
for invoice in self:
if invoice.invoice_line and invoice.type == 'in_invoice':
purchase_lst = purchase_obj.search(
[('invoice_method', '=', 'picking'), ('invoice_ids', '=',
invoice.id)])
operation_ids = []
if purchase_lst:
for purchase in purchase_lst:
for picking in purchase.picking_ids:
if picking.pack_operation_ids:
operation_ids.extend(
picking.pack_operation_ids)
# clean machine assigned operations
if operation_ids:
for operation in operation_ids:
if operation.machine or operation.product_qty > 1:
operation_ids.remove(operation)
for inv_line in invoice.invoice_line:
if inv_line.product_id.product_tmpl_id.machine_ok:
purchase_date = (invoice.date_invoice or
fields.Date.today())
for x in range(0, int(inv_line.quantity)):
vals = {'name': inv_line.product_id.name,
'product': inv_line.product_id.id,
'purch_inv': invoice.id,
'purch_partner': invoice.partner_id.id,
'purch_date': purchase_date,
'purch_cost': inv_line.price_unit,
'purch_inv_line': inv_line.id
}
machine = machinery_obj.create(vals)
for operation in operation_ids:
if machine.product == operation.product_id:
operation.machine = machine
machine.serial = operation.lot_id
operation_ids.remove(operation)
break
return res
| agpl-3.0 |
initChan/PredictionIO | examples/scala-parallel-similarproduct/filterbyyear/data/import_eventserver.py | 142 | 1844 | """
Import sample data for similar product engine
"""
import predictionio
import argparse
import random
SEED = 3
def import_events(client):
random.seed(SEED)
count = 0
print client.get_status()
print "Importing data..."
# generate 10 users, with user ids u1,u2,....,u10
user_ids = ["u%s" % i for i in range(1, 11)]
for user_id in user_ids:
print "Set user", user_id
client.create_event(
event="$set",
entity_type="user",
entity_id=user_id
)
count += 1
# generate 50 items, with item ids i1,i2,....,i50
# random assign 1 to 4 categories among c1-c6 to items
categories = ["c%s" % i for i in range(1, 7)]
item_ids = ["i%s" % i for i in range(1, 51)]
for item_id in item_ids:
print "Set item", item_id
client.create_event(
event="$set",
entity_type="item",
entity_id=item_id,
properties={
"categories" : random.sample(categories, random.randint(1, 4))
}
)
count += 1
# each user randomly viewed 10 items
for user_id in user_ids:
for viewed_item in random.sample(item_ids, 10):
print "User", user_id ,"views item", viewed_item
client.create_event(
event="view",
entity_type="user",
entity_id=user_id,
target_entity_type="item",
target_entity_id=viewed_item
)
count += 1
print "%s events are imported." % count
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Import sample data for similar product engine")
parser.add_argument('--access_key', default='invald_access_key')
parser.add_argument('--url', default="http://localhost:7070")
args = parser.parse_args()
print args
client = predictionio.EventClient(
access_key=args.access_key,
url=args.url,
threads=5,
qsize=500)
import_events(client)
| apache-2.0 |
polzy/PokeManager | pogo/POGOProtos/Networking/Responses/AddFortModifierResponse_pb2.py | 6 | 1927 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/AddFortModifierResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/AddFortModifierResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n=POGOProtos/Networking/Responses/AddFortModifierResponse.proto\x12\x1fPOGOProtos.Networking.Responses\"\x19\n\x17\x41\x64\x64\x46ortModifierResponseb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ADDFORTMODIFIERRESPONSE = _descriptor.Descriptor(
name='AddFortModifierResponse',
full_name='POGOProtos.Networking.Responses.AddFortModifierResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=123,
)
DESCRIPTOR.message_types_by_name['AddFortModifierResponse'] = _ADDFORTMODIFIERRESPONSE
AddFortModifierResponse = _reflection.GeneratedProtocolMessageType('AddFortModifierResponse', (_message.Message,), dict(
DESCRIPTOR = _ADDFORTMODIFIERRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.AddFortModifierResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.AddFortModifierResponse)
))
_sym_db.RegisterMessage(AddFortModifierResponse)
# @@protoc_insertion_point(module_scope)
| mit |
Donkyhotay/MoonPy | zope/wfmc/adapter/integration.py | 1 | 1606 | ##############################################################################
#
# Copyright (c) 2004 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Processes
$Id: integration.py 30314 2005-05-09 17:07:09Z jim $
"""
from zope import component, interface
from zope.wfmc import interfaces
interface.moduleProvides(interfaces.IIntegration)
def createParticipant(activity, process_definition_identifier, performer):
participant = component.queryAdapter(
activity, interfaces.IParticipant,
process_definition_identifier + '.' + performer)
if participant is None:
participant = component.getAdapter(
activity, interfaces.IParticipant, '.' + performer)
return participant
def createWorkItem(participant,
process_definition_identifier, application):
workitem = component.queryAdapter(
participant, interfaces.IWorkItem,
process_definition_identifier + '.' + application)
if workitem is None:
workitem = component.getAdapter(
participant, interfaces.IWorkItem, '.' + application)
return workitem
| gpl-3.0 |
inveniosoftware/invenio-ext | invenio_ext/collect/storage/link.py | 5 | 2066 | # -*- coding: utf8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Flask-Collect custom storage for development mode.
It creates symbolic links to the real files so any changes to them will be
reflected.
"""
import os
from flask_collect.storage.base import BaseStorage
class Storage(BaseStorage):
"""Storage that creates symlinks to the resources."""
def run(self):
"""Collect static from blueprints.
Create the directory tree but will symlink all the files.
"""
self.log("Collect static from blueprints")
skipped, total = 0, 0
for bp, f, o in self:
destination = os.path.join(self.collect.static_root, o)
destination_dir = os.path.dirname(destination)
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
if not os.path.exists(destination):
# the path is a link, but points to invalid location
if os.path.islink(destination):
os.remove(destination)
os.symlink(f, destination)
self.log("{0}:{1} symbolink link created".format(bp.name, o))
else:
skipped += 1
total += 1
self.log("{0} of {1} files already present".format(skipped, total))
self.log("Done collecting.")
| gpl-2.0 |
sss/calibre-at-bzr | src/cherrypy/_cpchecker.py | 87 | 14739 | import os
import warnings
import cherrypy
from cherrypy._cpcompat import iteritems, copykeys, builtins
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
When this object is called at engine startup, it executes each
of its own methods whose names start with ``check_``. If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False::
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace ``check_*`` methods in this way.
"""
on = True
"""If True (the default), run all checks; if False, turn off all checks."""
def __init__(self):
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith("check_"):
method = getattr(self, name)
if method and hasattr(method, '__call__'):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Function to format a warning."""
return "CherryPy Checker:\n%s\n\n" % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_app_config_entries_dont_start_with_script_name(self):
"""Check for Application config with sections that repeat script_name."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
if sn == '':
continue
sn_atoms = sn.strip("/").split("/")
for key in app.config.keys():
key_atoms = key.strip("/").split("/")
if key_atoms[:len(sn_atoms)] == sn_atoms:
warnings.warn(
"The application mounted at %r has config " \
"entries that start with its script name: %r" % (sn, key))
def check_site_config_entries_in_app_config(self):
"""Check for mounted Applications that have site-scoped config."""
for sn, app in iteritems(cherrypy.tree.apps):
if not isinstance(app, cherrypy.Application):
continue
msg = []
for section, entries in iteritems(app.config):
if section.startswith('/'):
for key, value in iteritems(entries):
for n in ("engine.", "server.", "tree.", "checker."):
if key.startswith(n):
msg.append("[%s] %s = %s" % (section, key, value))
if msg:
msg.insert(0,
"The application mounted at %r contains the following "
"config entries, which are only allowed in site-wide "
"config. Move them to a [global] section and pass them "
"to cherrypy.config.update() instead of tree.mount()." % sn)
warnings.warn(os.linesep.join(msg))
def check_skipped_app_config(self):
"""Check for mounted Applications that have no config."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = "The Application mounted at %r has an empty config." % sn
if self.global_config_contained_paths:
msg += (" It looks like the config you passed to "
"cherrypy.config.update() contains application-"
"specific sections. You must explicitly pass "
"application config via "
"cherrypy.tree.mount(..., config=app_config)")
warnings.warn(msg)
return
def check_app_config_brackets(self):
"""Check for Application config with extraneous brackets in section names."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
for key in app.config.keys():
if key.startswith("[") or key.endswith("]"):
warnings.warn(
"The application mounted at %r has config " \
"section names with extraneous brackets: %r. "
"Config *files* need brackets; config *dicts* "
"(e.g. passed to tree.mount) do not." % (sn, key))
def check_static_paths(self):
"""Check Application config for incorrect static paths."""
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + "/dummy.html")
conf = request.config.get
if conf("tools.staticdir.on", False):
msg = ""
root = conf("tools.staticdir.root")
dir = conf("tools.staticdir.dir")
if dir is None:
msg = "tools.staticdir.dir is not set."
else:
fulldir = ""
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ("dir is an absolute path, even "
"though a root is provided.")
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += ("\nIf you meant to serve the "
"filesystem folder at %r, remove "
"the leading slash from dir." % testdir)
else:
if not root:
msg = "dir is a relative path and no root provided."
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = "%r is not an absolute path." % fulldir
if fulldir and not os.path.exists(fulldir):
if msg:
msg += "\n"
msg += ("%r (root + dir) is not an existing "
"filesystem path." % fulldir)
if msg:
warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r"
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if k in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead.\n"
"section: [%s]" %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead.\n"
"section: [%s]" %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead."
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead."
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ["wsgi"]
ns.extend(copykeys(app.toolboxes))
ns.extend(copykeys(app.namespaces))
ns.extend(copykeys(app.request_class.namespaces))
ns.extend(copykeys(cherrypy.config.namespaces))
ns += self.extra_config_namespaces
for section, conf in app.config.items():
is_path_section = section.startswith("/")
if is_path_section and isinstance(conf, dict):
for k, v in conf.items():
atoms = k.split(".")
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if (atoms[0] == "cherrypy" and atoms[1] in ns):
msg = ("The config entry %r is invalid; "
"try %r instead.\nsection: [%s]"
% (k, ".".join(atoms[1:]), section))
else:
msg = ("The config entry %r is invalid, because "
"the %r config namespace is unknown.\n"
"section: [%s]" % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == "tools":
if atoms[1] not in dir(cherrypy.tools):
msg = ("The config entry %r may be invalid, "
"because the %r tool was not found.\n"
"section: [%s]" % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def _known_types(self, config):
msg = ("The config entry %r in section %r is of type %r, "
"which does not match the expected type %r.")
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
else:
k, v = section, conf
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'. See #711."""
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
"cause problems on newer systems, since 'localhost' can "
"map to either an IPv4 or an IPv6 address. You should "
"use '127.0.0.1' or '[::1]' instead.")
| gpl-3.0 |
wagnerand/olympia | src/olympia/reviewers/tests/test_review_scenarios.py | 5 | 3491 | """Real life review scenarios.
For different add-on and file statuses, test reviewing them, and make sure then
end up in the correct state.
"""
import pytest
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo.tests import user_factory
from olympia.files.models import File
from olympia.reviewers.utils import ReviewAddon, ReviewFiles, ReviewHelper
from olympia.versions.models import Version
@pytest.fixture
def mock_request(rf, db): # rf is a RequestFactory provided by pytest-django.
request = rf.get('/')
request.user = user_factory()
return request
@pytest.fixture
def addon_with_files(db):
"""Return an add-on with one version and three files.
By default the add-on is public, and the files are: disabled,
unreviewed, unreviewed.
"""
addon = Addon.objects.create(name='My Addon', slug='my-addon')
version = Version.objects.create(addon=addon)
for status in [amo.STATUS_DISABLED,
amo.STATUS_AWAITING_REVIEW, amo.STATUS_AWAITING_REVIEW]:
File.objects.create(version=version, status=status)
return addon
@pytest.mark.parametrize(
'review_action,addon_status,file_status,review_class,review_type,'
'final_addon_status,final_file_status',
[
# New addon request full.
# scenario0: should succeed, files approved.
('process_public', amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW,
ReviewAddon, 'nominated', amo.STATUS_PUBLIC,
amo.STATUS_PUBLIC),
# scenario1: should succeed, files rejected.
('process_sandbox', amo.STATUS_NOMINATED, amo.STATUS_AWAITING_REVIEW,
ReviewAddon, 'nominated', amo.STATUS_NULL,
amo.STATUS_DISABLED),
# Approved addon with a new file.
# scenario2: should succeed, files approved.
('process_public', amo.STATUS_PUBLIC, amo.STATUS_AWAITING_REVIEW,
ReviewFiles, 'pending', amo.STATUS_PUBLIC,
amo.STATUS_PUBLIC),
# scenario3: should succeed, files rejected.
('process_sandbox', amo.STATUS_PUBLIC, amo.STATUS_AWAITING_REVIEW,
ReviewFiles, 'pending', amo.STATUS_NULL,
amo.STATUS_DISABLED),
])
def test_review_scenario(mock_request, addon_with_files, review_action,
addon_status, file_status, review_class, review_type,
final_addon_status, final_file_status):
# Setup the addon and files.
addon = addon_with_files
addon.update(status=addon_status)
version = addon.versions.get()
version.files.filter(
status=amo.STATUS_AWAITING_REVIEW).update(status=file_status)
# Get the review helper.
helper = ReviewHelper(mock_request, addon, version)
assert isinstance(helper.handler, review_class)
helper.set_review_handler(mock_request)
assert helper.handler.review_type == review_type
helper.set_data({'comments': 'testing review scenarios'})
# Run the action (process_public, process_sandbox).
try:
getattr(helper.handler, review_action)()
except AssertionError:
# Some scenarios are expected to fail. We don't need to check it here,
# the scenario has the final statuses, and those are the ones we want
# to check.
pass
# Check the final statuses.
assert addon.reload().status == final_addon_status
assert list(version.files.values_list('status', flat=True)) == (
[amo.STATUS_DISABLED, final_file_status, final_file_status])
| bsd-3-clause |
microelly2/geodata | geodat/import_aster.py | 1 | 5208 | ''' geodat import AST (gdal)'''
# -*- coding: utf-8 -*-
#-------------------------------------------------
#-- geodat import AST (gdal)
#--
#-- microelly 2016 v 0.1
#--
#-- GNU Lesser General Public License (LGPL)
#-------------------------------------------------
#http://geoinformaticstutorial.blogspot.de/2012/09/reading-raster-data-with-python-and-gdal.html
#http://forum.freecadweb.org/viewtopic.php?f=8&t=17647&start=10#p139201
# the ast file is expected in ~/.FreeCAD/geodat/AST
# FreeCAD.ConfigGet("UserAppData") +'/geodat/AST/ASTGTM2_' + ff +'_dem.tif'
'''
ASTER GDEM Policy Agreements
I agree to redistribute the ASTER GDEM *only* to individuals within my organization or project of intended use or in response to disasters in support of the GEO Disaster Theme.
When presenting or publishing ASTER GDEM data, I agree to include "ASTER GDEM is a product of METI and NASA."
Because there are known inaccuracies and artifacts in the data set, please use the product with awareness of its limitations. The data are provided "as is" and neither NASA nor METI/ERSDAC will be responsible for any damages resulting from use of the data.
'''
from geodat.say import *
import geodat.transversmercator
from geodat.transversmercator import TransverseMercator
import geodat.import_xyz
import geodat.geodat_lib
# apt-get install python-gdal
import gdal
from gdalconst import *
import WebGui
import Points
def import_ast(b=50.26,l=11.39):
'''get the data from a downloaded file
the file is expected in FreeCAD.ConfigGet("UserAppData") + '/geodat/AST/'
with the common filename for lan/lot parameters
example .../.FreeCAD/geodat/AST/ASTGTM2_N51E010_dem.tif
'''
bs=np.floor(b)
ls=np.floor(l)
# the ast dataset
ff="N%02dE%03d" % (int(bs),int(ls))
fn=FreeCAD.ConfigGet("UserAppData") +'/geodat/AST/ASTGTM2_' + ff +'_dem.tif'
print(fn)
'''
fn='/home/microelly2/FCB/b217_heightmaps/tandemx_daten/Chile-Chuquicatmata.tif'
b=-22.3054705
l=-68.9259643
bs=np.floor(b)
ls=np.floor(l)
print(fn)
'''
dataset = gdal.Open(fn, GA_ReadOnly)
if dataset == None:
msg="\nProblem cannot open " + fn + "\n"
FreeCAD.Console.PrintError(msg)
errorDialog(msg)
return
cols=dataset.RasterXSize
rows=dataset.RasterYSize
geotransform = dataset.GetGeoTransform()
originX = geotransform[0]
originY = geotransform[3]
pixelWidth = geotransform[1]
pixelHeight = geotransform[5]
band = dataset.GetRasterBand(1)
data = band.ReadAsArray(0, 0, cols, rows)
#data.shape -> 3601 x 3601 secs
# erfurt 51,11
#data[0,0]
# zeitz 51,12
#data[3600,0]
# windischletten(zapfendorf) 50,11
#data[0,3600]
# troestau fichtelgebirge 50,12
#data[3600,3600]
px=int(round((bs+1-b)*3600))
py=int(round((l-ls)*3600))
pts=[]
d=70
d1=20
d2=50
d1=d
d2=d
tm=TransverseMercator()
tm.lat=b
tm.lon=l
center=tm.fromGeographic(tm.lat,tm.lon)
z0= data[px,py] # relative height to origin px,py
for x in range(px-d1,px+d1):
for y in range(py-d2,py+d2):
ll=tm.fromGeographic(bs+1-1.0/3600*x,ls+1.0/3600*y)
pt=FreeCAD.Vector(ll[0]-center[0],ll[1]-center[1], 1000.0* (data[x,y]-z0))
pts.append(pt)
# display the point cloud
p=Points.Points(pts)
Points.show(p)
return pts
s6='''
MainWindow:
VerticalLayout:
id:'main'
# setFixedHeight: 600
setFixedWidth: 600
move: PySide.QtCore.QPoint(3000,100)
QtGui.QLabel:
setText:"C O N F I G U R A T I O N"
QtGui.QLabel:
QtGui.QLineEdit:
id: 'bl'
# zeyerner wand **
#(50.2570152,11.3818337)
# outdoor inn *
#(50.3737109,11.1891891)
# roethen **
#(50.3902794,11.157629)
# kreuzung huettengrund nach judenbach ***
#(50.368209,11.2016135)
setText:"50.368209,11.2016135"
# coburg zentrum
setText:"50.2639926,10.9686946"
QtGui.QPushButton:
setText: "Create height models"
clicked.connect: app.runbl
QtGui.QPushButton:
setText: "show Map"
clicked.connect: app.showMap
'''
## the gui backend
class MyApp(object):
## create the height model
def runbl(self):
bl=self.root.ids['bl'].text()
spli=bl.split(',')
b=float(spli[0])
l=float(spli[1])
s=15
import_heights(float(b),float(l),float(s))
## display the location in openstreeetmap
def showMap(self):
bl=self.root.ids['bl'].text()
spli=bl.split(',')
b=float(spli[0])
l=float(spli[1])
s=15
WebGui.openBrowser( "http://www.openstreetmap.org/#map=16/"+str(b)+'/'+str(l))
## the dialog to import a gdal file
def mydialog():
'''the dialog to import a gdal file'''
app=MyApp()
import geodat
import geodat.miki as gmiki
miki=gmiki.Miki()
miki.app=app
app.root=miki
miki.run(s6)
FreeCAD.mm=miki
return miki
## import heigjs using import_xyz
def import_heights(b,l,s):
ts=time.time()
pcl=import_ast(b,l)
pts=pcl
ff="N" + str(b) + " E" + str(l)
nurbs=geodat.import_xyz.suv2(ff,pts,u=0,v=0,d=140,la=140,lb=140)
te=time.time()
print ("time to create models:",te-ts)
fn=geodat.geodat_lib.genSizeImage(size=512)
# geodat.geodat_lib.addImageTexture(nurbs,fn,scale=(8,3))
nurbs.ViewObject.Selectable = False
## test start and hide the dialog
def runtest():
m=mydialog()
m.objects[0].hide()
if __name__ == '__main__':
runtest()
def importASTER():
mydialog()
| lgpl-3.0 |
xuxiao19910803/edx | lms/djangoapps/courseware/features/video.py | 11 | 20973 | # -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
from lettuce import world, step, before, after
import json
import os
import time
import requests
from nose.tools import assert_equal, assert_true, assert_false
from common import i_am_registered_for_the_course, visit_scenario_item
from django.utils.translation import ugettext as _
from django.conf import settings
from cache_toolbox.core import del_cached_content
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
LANGUAGES = settings.ALL_LANGUAGES
VIDEO_SOURCE_PORT = settings.VIDEO_SOURCE_PORT
############### ACTIONS ####################
HTML5_SOURCES = [
'http://localhost:{0}/gizmo.mp4'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.webm'.format(VIDEO_SOURCE_PORT),
'http://localhost:{0}/gizmo.ogv'.format(VIDEO_SOURCE_PORT),
]
FLASH_SOURCES = {
'youtube_id_1_0': 'OEoXaMPEzfM',
'youtube_id_0_75': 'JMD_ifUUfsU',
'youtube_id_1_25': 'AKqURZnYqpk',
'youtube_id_1_5': 'DYpADpL7jAY',
}
HTML5_SOURCES_INCORRECT = [
'http://localhost:{0}/gizmo.mp99'.format(VIDEO_SOURCE_PORT),
]
VIDEO_BUTTONS = {
'CC': '.hide-subtitles',
'volume': '.volume',
'play': '.video_control.play',
'pause': '.video_control.pause',
'fullscreen': '.add-fullscreen',
'download_transcript': '.video-tracks > a',
'quality': '.quality-control',
}
VIDEO_MENUS = {
'language': '.lang .menu',
'speed': '.speed .menu',
'download_transcript': '.video-tracks .a11y-menu-list',
}
coursenum = 'test_course'
@before.each_scenario
def setUp(scenario):
world.video_sequences = {}
@after.each_scenario
def tearDown(scenario):
world.browser.cookies.delete('edX_video_player_mode')
class RequestHandlerWithSessionId(object):
def get(self, url):
"""
Sends a request.
"""
kwargs = dict()
session_id = [{i['name']:i['value']} for i in world.browser.cookies.all() if i['name'] == u'sessionid']
if session_id:
kwargs.update({
'cookies': session_id[0]
})
response = requests.get(url, **kwargs)
self.response = response
self.status_code = response.status_code
self.headers = response.headers
self.content = response.content
return self
def is_success(self):
"""
Returns `True` if the response was succeed, otherwise, returns `False`.
"""
if self.status_code < 400:
return True
return False
def check_header(self, name, value):
"""
Returns `True` if the response header exist and has appropriate value,
otherwise, returns `False`.
"""
if value in self.headers.get(name, ''):
return True
return False
def get_metadata(parent_location, player_mode, data, display_name='Video'):
kwargs = {
'parent_location': parent_location,
'category': 'video',
'display_name': display_name,
'metadata': {},
}
if player_mode == 'html5':
kwargs['metadata'].update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES
})
if player_mode == 'youtube_html5':
kwargs['metadata'].update({
'html5_sources': HTML5_SOURCES,
})
if player_mode == 'youtube_html5_unsupported_video':
kwargs['metadata'].update({
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'html5_unsupported_video':
kwargs['metadata'].update({
'youtube_id_1_0': '',
'youtube_id_0_75': '',
'youtube_id_1_25': '',
'youtube_id_1_5': '',
'html5_sources': HTML5_SOURCES_INCORRECT
})
if player_mode == 'flash':
kwargs['metadata'].update(FLASH_SOURCES)
world.browser.cookies.add({'edX_video_player_mode': 'flash'})
if data:
conversions = {
'transcripts': json.loads,
'download_track': json.loads,
'download_video': json.loads,
}
for key in data:
if key in conversions:
data[key] = conversions[key](data[key])
kwargs['metadata'].update(data)
return kwargs
def add_videos_to_course(course, player_mode=None, display_names=None, hashes=None):
parent_location = add_vertical_to_course(course)
kwargs = {
'course': course,
'parent_location': parent_location,
'player_mode': player_mode,
'display_name': display_names[0],
}
if hashes:
for index, item_data in enumerate(hashes):
kwargs.update({
'display_name': display_names[index],
'data': item_data,
})
add_video_to_course(**kwargs)
else:
add_video_to_course(**kwargs)
def add_video_to_course(course, parent_location=None, player_mode=None, data=None, display_name='Video'):
if not parent_location:
parent_location = add_vertical_to_course(course)
kwargs = get_metadata(parent_location, player_mode, data, display_name=display_name)
world.scenario_dict['VIDEO'] = world.ItemFactory.create(**kwargs)
def add_vertical_to_course(course_num):
world.scenario_dict['LAST_VERTICAL'] = world.ItemFactory.create(
parent_location=world.scenario_dict['SECTION'].location,
category='vertical',
display_name='Test Vertical-{}'.format(len(set(world.video_sequences.values()))),
)
return last_vertical_location(course_num)
def last_vertical_location(course_num):
return world.scenario_dict['LAST_VERTICAL'].location.replace(course=course_num)
def upload_file(filename, location):
path = os.path.join(TEST_ROOT, 'uploads/', filename)
f = open(os.path.abspath(path))
mime_type = "application/json"
content_location = StaticContent.compute_location(
location.course_key, filename
)
content = StaticContent(content_location, filename, mime_type, f.read())
contentstore().save(content)
del_cached_content(content.location)
def navigate_to_an_item_in_a_sequence(number):
sequence_css = '#sequence-list a[data-element="{0}"]'.format(number)
world.css_click(sequence_css)
def change_video_speed(speed):
world.browser.execute_script("$('.speeds').addClass('is-opened')")
speed_css = 'li[data-speed="{0}"] a'.format(speed)
world.wait_for_visible('.speeds')
world.css_click(speed_css)
def open_menu(menu):
world.browser.execute_script("$('{selector}').parent().addClass('is-opened')".format(
selector=VIDEO_MENUS[menu]
))
def get_all_dimensions():
video = get_dimensions('.video-player iframe, .video-player video')
wrapper = get_dimensions('.tc-wrapper')
controls = get_dimensions('.video-controls')
progress_slider = get_dimensions('.video-controls > .slider')
expected = dict(wrapper)
expected['height'] -= controls['height'] + 0.5 * progress_slider['height']
return (video, expected)
def get_dimensions(selector):
element = world.css_find(selector).first
return element._element.size
def get_window_dimensions():
return world.browser.driver.get_window_size()
def set_window_dimensions(width, height):
world.browser.driver.set_window_size(width, height)
# Wait 200 ms when JS finish resizing
world.wait(0.2)
def duration():
"""
Total duration of the video, in seconds.
"""
elapsed_time, duration = video_time()
return duration
def elapsed_time():
"""
Elapsed time of the video, in seconds.
"""
elapsed_time, duration = video_time()
return elapsed_time
def video_time():
"""
Return a tuple `(elapsed_time, duration)`, each in seconds.
"""
# The full time has the form "0:32 / 3:14"
full_time = world.css_text('div.vidtime')
# Split the time at the " / ", to get ["0:32", "3:14"]
elapsed_str, duration_str = full_time.split(' / ')
# Convert each string to seconds
return (parse_time_str(elapsed_str), parse_time_str(duration_str))
def parse_time_str(time_str):
"""
Parse a string of the form 1:23 into seconds (int).
"""
time_obj = time.strptime(time_str, '%M:%S')
return time_obj.tm_min * 60 + time_obj.tm_sec
def find_caption_line_by_data_index(index):
SELECTOR = ".subtitles > li[data-index='{index}']".format(index=index)
return world.css_find(SELECTOR).first
def wait_for_video():
world.wait_for_present('.is-initialized')
world.wait_for_present('div.vidtime')
world.wait_for_invisible('.video-wrapper .spinner')
world.wait_for_ajax_complete()
@step("I reload the page with video$")
def reload_the_page_with_video(_step):
_step.given('I reload the page')
wait_for_video()
@step('youtube stub server (.*) YouTube API')
def configure_youtube_api(_step, action):
action = action.strip()
if action == 'proxies':
world.youtube.config['youtube_api_blocked'] = False
elif action == 'blocks':
world.youtube.config['youtube_api_blocked'] = True
else:
raise ValueError('Parameter `action` should be one of "proxies" or "blocks".')
@step('when I view the (.*) it does not have autoplay enabled$')
def does_not_autoplay(_step, video_type):
actual = world.css_find('.%s' % video_type)[0]['data-autoplay']
expected = [u'False', u'false', False]
assert actual in expected
@step('the course has a Video component in "([^"]*)" mode(?:\:)?$')
def view_video(_step, player_mode):
i_am_registered_for_the_course(_step, coursenum)
data = _step.hashes[0] if _step.hashes else None
add_video_to_course(coursenum, player_mode=player_mode.lower(), data=data)
visit_scenario_item('SECTION')
wait_for_video()
@step('a video in "([^"]*)" mode(?:\:)?$')
def add_video(_step, player_mode):
data = _step.hashes[0] if _step.hashes else None
add_video_to_course(coursenum, player_mode=player_mode.lower(), data=data)
visit_scenario_item('SECTION')
wait_for_video()
@step('video(?:s)? "([^"]*)" in "([^"]*)" mode in position "([^"]*)" of sequential(?:\:)?$')
def add_video_in_position(_step, video_ids, player_mode, position):
sequences = {video_id.strip(): position for video_id in video_ids.split(',')}
add_videos_to_course(coursenum, player_mode=player_mode.lower(), display_names=sequences.keys(), hashes=_step.hashes)
world.video_sequences.update(sequences)
@step('I open the section with videos$')
def visit_video_section(_step):
visit_scenario_item('SECTION')
wait_for_video()
@step('I select the "([^"]*)" speed$')
def i_select_video_speed(_step, speed):
change_video_speed(speed)
@step('I select the "([^"]*)" speed on video "([^"]*)"$')
def change_video_speed_on_video(_step, speed, player_id):
navigate_to_an_item_in_a_sequence(world.video_sequences[player_id])
change_video_speed(speed)
@step('I open video "([^"]*)"$')
def open_video(_step, player_id):
navigate_to_an_item_in_a_sequence(world.video_sequences[player_id])
@step('video "([^"]*)" should start playing at speed "([^"]*)"$')
def check_video_speed(_step, player_id, speed):
speed_css = '.speeds .value'
assert world.css_has_text(speed_css, '{0}x'.format(speed))
@step('youtube server is up and response time is (.*) seconds$')
def set_youtube_response_timeout(_step, time):
world.youtube.config['time_to_response'] = float(time)
@step('the video has rendered in "([^"]*)" mode$')
def video_is_rendered(_step, mode):
modes = {
'html5': 'video',
'youtube': 'iframe',
'flash': 'iframe',
}
html_tag = modes[mode.lower()]
assert world.css_find('.video {0}'.format(html_tag)).first
@step('videos have rendered in "([^"]*)" mode$')
def videos_are_rendered(_step, mode):
modes = {
'html5': 'video',
'youtube': 'iframe',
'flash': 'iframe',
}
html_tag = modes[mode.lower()]
actual = len(world.css_find('.video {0}'.format(html_tag)))
expected = len(world.css_find('.xmodule_VideoModule'))
assert actual == expected
@step('all sources are correct$')
def all_sources_are_correct(_step):
elements = world.css_find('.video-player video source')
sources = [source['src'].split('?')[0] for source in elements]
assert set(sources) == set(HTML5_SOURCES)
@step('error message is shown$')
def error_message_is_shown(_step):
selector = '.video .video-player h3'
assert world.css_visible(selector)
@step('error message has correct text$')
def error_message_has_correct_text(_step):
selector = '.video .video-player h3'
text = _('ERROR: No playable video sources found!')
assert world.css_has_text(selector, text)
@step('I make sure captions are (.+)$')
def set_captions_visibility_state(_step, captions_state):
SELECTOR = '.closed .subtitles'
if world.is_css_not_present(SELECTOR):
if captions_state == 'closed':
world.css_click('.hide-subtitles')
else:
if captions_state != 'closed':
world.css_click('.hide-subtitles')
@step('I see video menu "([^"]*)" with correct items$')
def i_see_menu(_step, menu):
open_menu(menu)
menu_items = world.css_find(VIDEO_MENUS[menu] + ' li')
video = world.scenario_dict['VIDEO']
transcripts = dict(video.transcripts)
if video.sub:
transcripts.update({
'en': video.sub
})
languages = {i[0]: i[1] for i in LANGUAGES}
transcripts = {k: languages[k] for k in transcripts}
for code, label in transcripts.items():
assert any([i.text == label for i in menu_items])
assert any([i['data-lang-code'] == code for i in menu_items])
@step('I see "([^"]*)" text in the captions$')
def check_text_in_the_captions(_step, text):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'))
actual_text = world.css_text('.subtitles')
assert text in actual_text
@step('I see text in the captions:')
def check_captions(_step):
world.wait_for_present('.video.is-captions-rendered')
for index, video in enumerate(_step.hashes):
assert video.get('text') in world.css_text('.subtitles', index=index)
@step('I select language with code "([^"]*)"$')
def select_language(_step, code):
world.wait_for_visible('.video-controls')
# Make sure that all ajax requests that affects the language menu are finished.
# For example, request to get new translation etc.
world.wait_for_ajax_complete()
selector = VIDEO_MENUS["language"] + ' li[data-lang-code="{code}"]'.format(
code=code
)
world.css_find(VIDEO_BUTTONS["CC"])[0].mouse_over()
world.wait_for_present('.lang.open')
world.css_click(selector)
assert world.css_has_class(selector, 'is-active')
assert len(world.css_find(VIDEO_MENUS["language"] + ' li.is-active')) == 1
# Make sure that all ajax requests that affects the display of captions are finished.
# For example, request to get new translation etc.
world.wait_for_ajax_complete()
world.wait_for_visible('.subtitles')
world.wait_for_present('.video.is-captions-rendered')
@step('I click video button "([^"]*)"$')
def click_button(_step, button):
world.css_click(VIDEO_BUTTONS[button])
if button == "play":
# Needs to wait for video buffrization
world.wait_for(
func=lambda _: world.css_has_class('.video', 'is-playing') and world.is_css_present(VIDEO_BUTTONS['pause']),
timeout=30
)
world.wait_for_ajax_complete()
@step('I see video slider at "([^"]*)" position$')
def start_playing_video_from_n_seconds(_step, time_str):
position = parse_time_str(time_str)
actual_position = elapsed_time()
assert_equal(actual_position, int(position), "Current position is {}, but should be {}".format(actual_position, position))
@step('I see duration "([^"]*)"$')
def i_see_duration(_step, position):
world.wait_for(
func=lambda _: duration() > 0,
timeout=30
)
assert duration() == parse_time_str(position)
@step('I wait for video controls appear$')
def controls_appear(_step):
world.wait_for_visible('.video-controls')
@step('I seek video to "([^"]*)" position$')
def seek_video_to_n_seconds(_step, time_str):
time = parse_time_str(time_str)
jsCode = "$('.video').data('video-player-state').videoPlayer.onSlideSeek({{time: {0}}})".format(time)
world.browser.execute_script(jsCode)
world.wait_for(
func=lambda _: world.retry_on_exception(lambda: elapsed_time() == time and not world.css_has_class('.video', 'is-buffering')),
timeout=30
)
_step.given('I see video slider at "{0}" position'.format(time_str))
@step('I have a "([^"]*)" transcript file in assets$')
def upload_to_assets(_step, filename):
upload_file(filename, world.scenario_dict['COURSE'].location)
@step('menu "([^"]*)" doesn\'t exist$')
def is_hidden_menu(_step, menu):
assert world.is_css_not_present(VIDEO_MENUS[menu])
@step('I see video aligned correctly (with(?:out)?) enabled transcript$')
def video_alignment(_step, transcript_visibility):
# Width of the video container in css equal 75% of window if transcript enabled
wrapper_width = 75 if transcript_visibility == "with" else 100
initial = get_window_dimensions()
set_window_dimensions(300, 600)
real, expected = get_all_dimensions()
width = round(100 * real['width'] / expected['width']) == wrapper_width
set_window_dimensions(600, 300)
real, expected = get_all_dimensions()
height = abs(expected['height'] - real['height']) <= 5
# Restore initial window size
set_window_dimensions(initial['width'], initial['height'])
assert all([width, height])
@step('I can download transcript in "([^"]*)" format that has text "([^"]*)"$')
def i_can_download_transcript(_step, format, text):
assert world.css_has_text('.video-tracks .a11y-menu-button', '.' + format, strip=True)
formats = {
'srt': 'application/x-subrip',
'txt': 'text/plain',
}
url = world.css_find(VIDEO_BUTTONS['download_transcript'])[0]['href']
request = RequestHandlerWithSessionId()
assert request.get(url).is_success()
assert request.check_header('content-type', formats[format])
assert text.encode('utf-8') in request.content
@step('I select the transcript format "([^"]*)"$')
def select_transcript_format(_step, format):
button_selector = '.video-tracks .a11y-menu-button'
menu_selector = VIDEO_MENUS['download_transcript']
button = world.css_find(button_selector).first
height = button._element.location_once_scrolled_into_view['y']
world.browser.driver.execute_script("window.scrollTo(0, {});".format(height))
button.mouse_over()
assert world.css_has_text(button_selector, '...', strip=True)
menu_items = world.css_find(menu_selector + ' a')
for item in menu_items:
if item['data-value'] == format:
item.click()
world.wait_for_ajax_complete()
break
world.browser.driver.execute_script("window.scrollTo(0, 0);")
assert world.css_find(menu_selector + ' .active a')[0]['data-value'] == format
assert world.css_has_text(button_selector, '.' + format, strip=True)
@step('video (.*) show the captions$')
def shows_captions(_step, show_captions):
if 'not' in show_captions or 'n\'t' in show_captions:
assert world.is_css_present('div.video.closed')
else:
assert world.is_css_not_present('div.video.closed')
@step('I click on caption line "([^"]*)", video module shows elapsed time "([^"]*)"$')
def click_on_the_caption(_step, index, expected_time):
world.wait_for_present('.video.is-captions-rendered')
find_caption_line_by_data_index(int(index)).click()
actual_time = elapsed_time()
assert int(expected_time) == actual_time
@step('button "([^"]*)" is (hidden|visible)$')
def is_hidden_button(_step, button, state):
selector = VIDEO_BUTTONS[button]
if state == 'hidden':
world.wait_for_invisible(selector)
assert_false(
world.css_visible(selector),
'Button {0} is invisible, but should be visible'.format(button)
)
else:
world.wait_for_visible(selector)
assert_true(
world.css_visible(selector),
'Button {0} is visible, but should be invisible'.format(button)
)
@step('button "([^"]*)" is (active|inactive)$')
def i_see_active_button(_step, button, state):
selector = VIDEO_BUTTONS[button]
if state == 'active':
assert world.css_has_class(selector, 'active')
else:
assert not world.css_has_class(selector, 'active')
| agpl-3.0 |
thombashi/pytablewriter | test/writer/text/test_html_writer.py | 1 | 14391 | """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from textwrap import dedent
import pytest
import pytablewriter
from pytablewriter.style import Style
from ..._common import print_test_result
from ...data import (
Data,
headers,
mix_header_list,
mix_value_matrix,
null_test_data_list,
value_matrix,
value_matrix_with_none,
vut_style_tabledata,
vut_styles,
)
normal_test_data_list = [
Data(
table="",
indent=" ",
header=headers,
value=value_matrix,
expected="""<table>
<thead>
<tr>
<th>a</th>
<th>b</th>
<th>c</th>
<th>dd</th>
<th>e</th>
</tr>
</thead>
<tbody>
<tr>
<td align="right">1</td>
<td align="right">123.1</td>
<td align="left">a</td>
<td align="right">1.0</td>
<td align="right">1</td>
</tr>
<tr>
<td align="right">2</td>
<td align="right">2.2</td>
<td align="left">bb</td>
<td align="right">2.2</td>
<td align="right">2.2</td>
</tr>
<tr>
<td align="right">3</td>
<td align="right">3.3</td>
<td align="left">ccc</td>
<td align="right">3.0</td>
<td align="left">cccc</td>
</tr>
</tbody>
</table>
""",
),
Data(
table=None,
indent=" ",
header=None,
value=value_matrix,
expected="""<table>
<tbody>
<tr>
<td align="right">1</td>
<td align="right">123.1</td>
<td align="left">a</td>
<td align="right">1.0</td>
<td align="right">1</td>
</tr>
<tr>
<td align="right">2</td>
<td align="right">2.2</td>
<td align="left">bb</td>
<td align="right">2.2</td>
<td align="right">2.2</td>
</tr>
<tr>
<td align="right">3</td>
<td align="right">3.3</td>
<td align="left">ccc</td>
<td align="right">3.0</td>
<td align="left">cccc</td>
</tr>
</tbody>
</table>
""",
),
Data(
table="tablename",
indent=" ",
header=headers,
value=[],
expected="""<table id="tablename">
<caption>tablename</caption>
<thead>
<tr>
<th>a</th>
<th>b</th>
<th>c</th>
<th>dd</th>
<th>e</th>
</tr>
</thead>
<tbody></tbody>
</table>
""",
),
Data(
table=None,
indent=" ",
header=headers,
value=None,
expected="""<table>
<thead>
<tr>
<th>a</th>
<th>b</th>
<th>c</th>
<th>dd</th>
<th>e</th>
</tr>
</thead>
<tbody></tbody>
</table>
""",
),
Data(
table="",
indent=" ",
header=headers,
value=value_matrix_with_none,
expected="""<table>
<thead>
<tr>
<th>a</th>
<th>b</th>
<th>c</th>
<th>dd</th>
<th>e</th>
</tr>
</thead>
<tbody>
<tr>
<td align="right">1</td>
<td align="right"></td>
<td align="left">a</td>
<td align="right">1.0</td>
<td align="left"></td>
</tr>
<tr>
<td align="right"></td>
<td align="right">2.2</td>
<td align="left"></td>
<td align="right">2.2</td>
<td align="right">2.2</td>
</tr>
<tr>
<td align="right">3</td>
<td align="right">3.3</td>
<td align="left">ccc</td>
<td align="right"></td>
<td align="left">cccc</td>
</tr>
<tr>
<td align="right"></td>
<td align="right"></td>
<td align="left"></td>
<td align="right"></td>
<td align="left"></td>
</tr>
</tbody>
</table>
""",
),
Data(
table="tablename",
indent=" ",
header=mix_header_list,
value=mix_value_matrix,
expected="""<table id="tablename">
<caption>tablename</caption>
<thead>
<tr>
<th>i</th>
<th>f</th>
<th>c</th>
<th>if</th>
<th>ifc</th>
<th>bool</th>
<th>inf</th>
<th>nan</th>
<th>mix_num</th>
<th>time</th>
</tr>
</thead>
<tbody>
<tr>
<td align="right">1</td>
<td align="right">1.10</td>
<td align="left">aa</td>
<td align="right">1.0</td>
<td align="right">1</td>
<td align="left">True</td>
<td align="left">Infinity</td>
<td align="left">NaN</td>
<td align="right">1</td>
<td align="left">2017-01-01T00:00:00</td>
</tr>
<tr>
<td align="right">2</td>
<td align="right">2.20</td>
<td align="left">bbb</td>
<td align="right">2.2</td>
<td align="right">2.2</td>
<td align="left">False</td>
<td align="left">Infinity</td>
<td align="left">NaN</td>
<td align="right">Infinity</td>
<td align="left">2017-01-02 03:04:05+09:00</td>
</tr>
<tr>
<td align="right">3</td>
<td align="right">3.33</td>
<td align="left">cccc</td>
<td align="right">-3.0</td>
<td align="left">ccc</td>
<td align="left">True</td>
<td align="left">Infinity</td>
<td align="left">NaN</td>
<td align="right">NaN</td>
<td align="left">2017-01-01T00:00:00</td>
</tr>
</tbody>
</table>
""",
),
]
table_writer_class = pytablewriter.HtmlTableWriter
class Test_HtmlTableWriter_write_new_line:
def test_normal(self, capsys):
writer = table_writer_class()
writer.write_null_line()
out, _err = capsys.readouterr()
assert out == "\n"
class Test_HtmlTableWriter_write_table:
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in normal_test_data_list
],
)
def test_normal(self, capsys, table, indent, header, value, expected):
writer = table_writer_class(
table_name=table, indent_string=indent, headers=header, value_matrix=value
)
writer.write_table()
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
assert writer.dumps() == expected
assert str(writer) == expected
def test_normal_styles(self, capsys):
writer = table_writer_class(column_styles=vut_styles)
writer.from_tabledata(vut_style_tabledata)
writer.write_table()
expected = dedent(
"""\
<table id="styletest">
<caption>style test</caption>
<thead>
<tr>
<th>none</th>
<th>empty</th>
<th>tiny</th>
<th>small</th>
<th>medium</th>
<th>large</th>
<th>null w/ bold</th>
<th>L bold</th>
<th>S italic</th>
<th>L bold italic</th>
</tr>
</thead>
<tbody>
<tr>
<td align="right">111</td>
<td align="right">111</td>
<td align="right" style="font-size:x-small">111</td>
<td align="right" style="font-size:small">111</td>
<td align="right" style="font-size:medium">111</td>
<td align="right" style="font-size:large">111</td>
<td align="left" style="font-weight:bold"></td>
<td align="right" style="font-size:large; font-weight:bold">111</td>
<td align="right" style="font-size:small; font-style:italic">111</td>
<td align="right" style="font-size:large; font-weight:bold; font-style:italic">111</td>
</tr>
<tr>
<td align="right">1234</td>
<td align="right">1234</td>
<td align="right" style="font-size:x-small">1234</td>
<td align="right" style="font-size:small">1234</td>
<td align="right" style="font-size:medium">1,234</td>
<td align="right" style="font-size:large">1 234</td>
<td align="left" style="font-weight:bold"></td>
<td align="right" style="font-size:large; font-weight:bold">1234</td>
<td align="right" style="font-size:small; font-style:italic">1234</td>
<td align="right" style="font-size:large; font-weight:bold; font-style:italic">1234</td>
</tr>
</tbody>
</table>
"""
)
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
print("----- _repr_html_ -----")
out = writer._repr_html_()
print_test_result(expected=expected, actual=out)
assert out == expected
def test_normal_valign(self, capsys):
writer = table_writer_class(
table_name="vertical-align",
headers=[
"",
"top",
"middle",
"bottom",
"top-right",
"middle-right",
"bottom-right",
],
value_matrix=[
["te\nst", "x", "x", "x", "x", "x", "x"],
],
column_styles=[
Style(vertical_align="baseline"),
Style(vertical_align="top"),
Style(vertical_align="middle"),
Style(vertical_align="bottom"),
Style(align="right", vertical_align="top"),
Style(align="right", vertical_align="middle"),
Style(align="right", vertical_align="bottom"),
],
)
writer.write_table()
expected = """\
<table id="verticalalign">
<caption>vertical-align</caption>
<thead>
<tr>
<th></th>
<th>top</th>
<th>middle</th>
<th>bottom</th>
<th>top-right</th>
<th>middle-right</th>
<th>bottom-right</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">te<br>st</td>
<td align="left" valign="top">x</td>
<td align="left" valign="middle">x</td>
<td align="left" valign="bottom">x</td>
<td align="right" valign="top">x</td>
<td align="right" valign="middle">x</td>
<td align="right" valign="bottom">x</td>
</tr>
</tbody>
</table>
"""
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_line_breaks(self, capsys):
writer = table_writer_class(
table_name="line breaks",
headers=["a\nb", "\nc\n\nd\n", "e\r\nf"],
value_matrix=[["v1\nv1", "v2\n\nv2", "v3\r\nv3"]],
)
writer.write_table()
expected = """\
<table id="linebreaks">
<caption>line breaks</caption>
<thead>
<tr>
<th>a<br>b</th>
<th><br>c<br><br>d<br></th>
<th>e<br>f</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">v1<br>v1</td>
<td align="left">v2<br><br>v2</td>
<td align="left">v3<br>v3</td>
</tr>
</tbody>
</table>
"""
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_none_values(self, capsys):
writer = table_writer_class()
writer.table_name = "none value"
writer.headers = ["none"]
writer.value_matrix = [[None]]
writer.write_table()
expected = """\
<table id="nonevalue">
<caption>none value</caption>
<thead>
<tr>
<th>none</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left"></td>
</tr>
</tbody>
</table>
"""
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
def test_normal_write_css(self, capsys):
writer = table_writer_class()
writer.table_name = "Write HTML with CSS"
writer.headers = ["int"]
writer.value_matrix = [[1]]
writer.write_table(write_css=True)
expected = """\
<style type="text/css">
.Write-HTML-with-CSS-css thead th:nth-child(1) {
text-align: left;
}
.Write-HTML-with-CSS-css tbody tr:nth-child(1) td:nth-child(1) {
text-align: right;
}
</style>
<table class="Write-HTML-with-CSS-css" id="WriteHTMLwithCSS">
<caption>Write HTML with CSS</caption>
<thead>
<tr>
<th>int</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
</tr>
</tbody>
</table>
"""
out, err = capsys.readouterr()
print_test_result(expected=expected, actual=out, error=err)
assert out == expected
@pytest.mark.parametrize(
["table", "indent", "header", "value", "expected"],
[
[data.table, data.indent, data.header, data.value, data.expected]
for data in null_test_data_list
],
)
def test_normal_empty(self, table, indent, header, value, expected):
writer = table_writer_class()
writer.table_name = table
writer.indent_string = indent
writer.headers = header
writer.value_matrix = value
assert writer.dumps() == ""
class Test_HtmlTableWriter_write_table_iter:
def test_exception(self):
writer = table_writer_class()
with pytest.raises(pytablewriter.NotSupportedError):
writer.write_table_iter()
| mit |
gagneurlab/concise | concise/utils/splines.py | 2 | 6437 | # get the splines
import numpy as np
import scipy.interpolate as si
# TODO - BSpline.predict() -> allow x to be of any shape. return.shape = in.shape + (n_bases)
# MAYBE TODO - implement si.splev using keras.backend.
# - That way you don't have to hash the X_spline in memory.
class BSpline():
"""Class for computing the B-spline funcions b_i(x) and
constructing the penality matrix S.
# Arguments
start: float or int; start of the region
end: float or int; end of the region
n_bases: int; number of spline bases
spline_order: int; spline order
# Methods
- **getS(add_intercept=False)** - Get the penalty matrix S
- Arguments
- **add_intercept**: bool. If true, intercept column is added to the returned matrix.
- Returns
- `np.array`, of shape `(n_bases + add_intercept, n_bases + add_intercept)`
- **predict(x, add_intercept=False)** - For some x, predict the bn(x) for each base
- Arguments
- **x**: np.array; Vector of dimension 1
- **add_intercept**: bool; If True, intercept column is added to the to the final array
- Returns
- `np.array`, of shape `(len(x), n_bases + (add_intercept))`
"""
def __init__(self, start=0, end=1, n_bases=10, spline_order=3):
self.start = start
self.end = end
self.n_bases = n_bases
self.spline_order = spline_order
self.knots = get_knots(self.start, self.end, self.n_bases, self.spline_order)
self.S = get_S(self.n_bases, self.spline_order, add_intercept=False)
def __repr__(self):
return "BSpline(start={0}, end={1}, n_bases={2}, spline_order={3})".\
format(self.start, self.end, self.n_bases, self.spline_order)
def getS(self, add_intercept=False):
"""Get the penalty matrix S
Returns
np.array, of shape (n_bases + add_intercept, n_bases + add_intercept)
"""
S = self.S
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S
def predict(self, x, add_intercept=False):
"""For some x, predict the bn(x) for each base
Arguments:
x: np.array; Vector of dimension 1
add_intercept: bool; should we add the intercept to the final array
Returns:
np.array, of shape (len(x), n_bases + (add_intercept))
"""
# sanity check
if x.min() < self.start:
raise Warning("x.min() < self.start")
if x.max() > self.end:
raise Warning("x.max() > self.end")
return get_X_spline(x=x,
knots=self.knots,
n_bases=self.n_bases,
spline_order=self.spline_order,
add_intercept=add_intercept)
def get_config(self):
return {"start": self.start,
"end": self.end,
"n_bases": self.n_bases,
"spline_order": self.spline_order
}
@classmethod
def from_config(cls, config):
return cls(**config)
############################################
# core functions
def get_gam_splines(start=0, end=100, n_bases=10, spline_order=3, add_intercept=True):
"""Main function required by (TF)Concise class
"""
# make sure n_bases is an int
assert type(n_bases) == int
x = np.arange(start, end + 1)
knots = get_knots(start, end, n_bases, spline_order)
X_splines = get_X_spline(x, knots, n_bases, spline_order, add_intercept)
S = get_S(n_bases, spline_order, add_intercept)
# Get the same knot positions as with mgcv
# https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560
return X_splines, S, knots
############################################
# helper functions
# main resource:
# https://github.com/cran/mgcv/blob/master/R/smooth.r#L1560
def get_knots(start, end, n_bases=10, spline_order=3):
"""
Arguments:
x; np.array of dim 1
"""
x_range = end - start
start = start - x_range * 0.001
end = end + x_range * 0.001
# mgcv annotation
m = spline_order - 1
nk = n_bases - m # number of interior knots
dknots = (end - start) / (nk - 1)
knots = np.linspace(start=start - dknots * (m + 1),
stop=end + dknots * (m + 1),
num=nk + 2 * m + 2)
return knots.astype(np.float32)
# - get knots as arguments
def get_X_spline(x, knots, n_bases=10, spline_order=3, add_intercept=True):
"""
Returns:
np.array of shape [len(x), n_bases + (add_intercept)]
# BSpline formula
https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.BSpline.html#scipy.interpolate.BSpline
Fortran code:
https://github.com/scipy/scipy/blob/v0.19.0/scipy/interpolate/fitpack/splev.f
"""
if len(x.shape) is not 1:
raise ValueError("x has to be 1 dimentional")
tck = [knots, np.zeros(n_bases), spline_order]
X = np.zeros([len(x), n_bases])
for i in range(n_bases):
vec = np.zeros(n_bases)
vec[i] = 1.0
tck[1] = vec
X[:, i] = si.splev(x, tck, der=0)
if add_intercept is True:
ones = np.ones_like(X[:, :1])
X = np.hstack([ones, X])
return X.astype(np.float32)
def get_S(n_bases=10, spline_order=3, add_intercept=True):
# mvcv R-code
# S<-diag(object$bs.dim);
# if (m[2]) for (i in 1:m[2]) S <- diff(S)
# object$S <- list(t(S)%*%S) # get penalty
# object$S[[1]] <- (object$S[[1]]+t(object$S[[1]]))/2 # exact symmetry
S = np.identity(n_bases)
m2 = spline_order - 1 # m[2] is the same as m[1] by default
# m2 order differences
for i in range(m2):
S = np.diff(S, axis=0) # same as diff() in R
S = np.dot(S.T, S)
S = (S + S.T) / 2 # exact symmetry
if add_intercept is True:
# S <- cbind(0, rbind(0, S)) # in R
zeros = np.zeros_like(S[:1, :])
S = np.vstack([zeros, S])
zeros = np.zeros_like(S[:, :1])
S = np.hstack([zeros, S])
return S.astype(np.float32)
| mit |
nmayorov/scikit-learn | examples/plot_multilabel.py | 236 | 4157 | # Authors: Vlad Niculae, Mathieu Blondel
# License: BSD 3 clause
"""
=========================
Multilabel classification
=========================
This example simulates a multi-label document classification problem. The
dataset is generated randomly based on the following process:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that n is more
than 2, and that the document length is never zero. Likewise, we reject classes
which have already been chosen. The documents that are assigned to both
classes are plotted surrounded by two colored circles.
The classification is performed by projecting to the first two principal
components found by PCA and CCA for visualisation purposes, followed by using
the :class:`sklearn.multiclass.OneVsRestClassifier` metaclassifier using two
SVCs with linear kernels to learn a discriminative model for each class.
Note that PCA is used to perform an unsupervised dimensionality reduction,
while CCA is used to perform a supervised one.
Note: in the plot, "unlabeled samples" does not mean that we don't know the
labels (as in semi-supervised learning) but that the samples simply do *not*
have a label.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.preprocessing import LabelBinarizer
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import CCA
def plot_hyperplane(clf, min_x, max_x, linestyle, label):
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(min_x - 5, max_x + 5) # make sure the line is long enough
yy = a * xx - (clf.intercept_[0]) / w[1]
plt.plot(xx, yy, linestyle, label=label)
def plot_subfigure(X, Y, subplot, title, transform):
if transform == "pca":
X = PCA(n_components=2).fit_transform(X)
elif transform == "cca":
X = CCA(n_components=2).fit(X, Y).transform(X)
else:
raise ValueError
min_x = np.min(X[:, 0])
max_x = np.max(X[:, 0])
min_y = np.min(X[:, 1])
max_y = np.max(X[:, 1])
classif = OneVsRestClassifier(SVC(kernel='linear'))
classif.fit(X, Y)
plt.subplot(2, 2, subplot)
plt.title(title)
zero_class = np.where(Y[:, 0])
one_class = np.where(Y[:, 1])
plt.scatter(X[:, 0], X[:, 1], s=40, c='gray')
plt.scatter(X[zero_class, 0], X[zero_class, 1], s=160, edgecolors='b',
facecolors='none', linewidths=2, label='Class 1')
plt.scatter(X[one_class, 0], X[one_class, 1], s=80, edgecolors='orange',
facecolors='none', linewidths=2, label='Class 2')
plot_hyperplane(classif.estimators_[0], min_x, max_x, 'k--',
'Boundary\nfor class 1')
plot_hyperplane(classif.estimators_[1], min_x, max_x, 'k-.',
'Boundary\nfor class 2')
plt.xticks(())
plt.yticks(())
plt.xlim(min_x - .5 * max_x, max_x + .5 * max_x)
plt.ylim(min_y - .5 * max_y, max_y + .5 * max_y)
if subplot == 2:
plt.xlabel('First principal component')
plt.ylabel('Second principal component')
plt.legend(loc="upper left")
plt.figure(figsize=(8, 6))
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=True,
random_state=1)
plot_subfigure(X, Y, 1, "With unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 2, "With unlabeled samples + PCA", "pca")
X, Y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
random_state=1)
plot_subfigure(X, Y, 3, "Without unlabeled samples + CCA", "cca")
plot_subfigure(X, Y, 4, "Without unlabeled samples + PCA", "pca")
plt.subplots_adjust(.04, .02, .97, .94, .09, .2)
plt.show()
| bsd-3-clause |
mapr/hue | desktop/core/ext-py/MySQL-python-1.2.5/_mysql_exceptions.py | 99 | 2352 | """_mysql_exceptions: Exception classes for _mysql and MySQLdb.
These classes are dictated by the DB API v2.0:
http://www.python.org/topics/database/DatabaseAPI-2.0.html
"""
try:
from exceptions import Exception, StandardError, Warning
except ImportError:
# Python 3
StandardError = Exception
class MySQLError(StandardError):
"""Exception related to operation with MySQL."""
class Warning(Warning, MySQLError):
"""Exception raised for important warnings like data truncations
while inserting, etc."""
class Error(MySQLError):
"""Exception that is the base class of all other error exceptions
(not Warning)."""
class InterfaceError(Error):
"""Exception raised for errors that are related to the database
interface rather than the database itself."""
class DatabaseError(Error):
"""Exception raised for errors that are related to the
database."""
class DataError(DatabaseError):
"""Exception raised for errors that are due to problems with the
processed data like division by zero, numeric value out of range,
etc."""
class OperationalError(DatabaseError):
"""Exception raised for errors that are related to the database's
operation and not necessarily under the control of the programmer,
e.g. an unexpected disconnect occurs, the data source name is not
found, a transaction could not be processed, a memory allocation
error occurred during processing, etc."""
class IntegrityError(DatabaseError):
"""Exception raised when the relational integrity of the database
is affected, e.g. a foreign key check fails, duplicate key,
etc."""
class InternalError(DatabaseError):
"""Exception raised when the database encounters an internal
error, e.g. the cursor is not valid anymore, the transaction is
out of sync, etc."""
class ProgrammingError(DatabaseError):
"""Exception raised for programming errors, e.g. table not found
or already exists, syntax error in the SQL statement, wrong number
of parameters specified, etc."""
class NotSupportedError(DatabaseError):
"""Exception raised in case a method or database API was used
which is not supported by the database, e.g. requesting a
.rollback() on a connection that does not support transaction or
has transactions turned off."""
| apache-2.0 |
lckung/spark-ec2 | launch-script/lib/boto-2.34.0/tests/integration/ec2/autoscale/test_connection.py | 114 | 6560 | # Copyright (c) 2011 Reza Lotun http://reza.lotun.name
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Some unit tests for the AutoscaleConnection
"""
import time
from boto.ec2.autoscale import AutoScaleConnection
from boto.ec2.autoscale.activity import Activity
from boto.ec2.autoscale.group import AutoScalingGroup, ProcessType
from boto.ec2.autoscale.launchconfig import LaunchConfiguration
from boto.ec2.autoscale.policy import AdjustmentType, MetricCollectionTypes, ScalingPolicy
from boto.ec2.autoscale.scheduled import ScheduledUpdateGroupAction
from boto.ec2.autoscale.instance import Instance
from boto.ec2.autoscale.tag import Tag
from tests.compat import unittest
class AutoscaleConnectionTest(unittest.TestCase):
ec2 = True
autoscale = True
def test_basic(self):
# NB: as it says on the tin these are really basic tests that only
# (lightly) exercise read-only behaviour - and that's only if you
# have any autoscale groups to introspect. It's useful, however, to
# catch simple errors
print('--- running %s tests ---' % self.__class__.__name__)
c = AutoScaleConnection()
self.assertTrue(repr(c).startswith('AutoScaleConnection'))
groups = c.get_all_groups()
for group in groups:
self.assertIsInstance(group, AutoScalingGroup)
# get activities
activities = group.get_activities()
for activity in activities:
self.assertIsInstance(activity, Activity)
# get launch configs
configs = c.get_all_launch_configurations()
for config in configs:
self.assertIsInstance(config, LaunchConfiguration)
# get policies
policies = c.get_all_policies()
for policy in policies:
self.assertIsInstance(policy, ScalingPolicy)
# get scheduled actions
actions = c.get_all_scheduled_actions()
for action in actions:
self.assertIsInstance(action, ScheduledUpdateGroupAction)
# get instances
instances = c.get_all_autoscaling_instances()
for instance in instances:
self.assertIsInstance(instance, Instance)
# get all scaling process types
ptypes = c.get_all_scaling_process_types()
for ptype in ptypes:
self.assertTrue(ptype, ProcessType)
# get adjustment types
adjustments = c.get_all_adjustment_types()
for adjustment in adjustments:
self.assertIsInstance(adjustment, AdjustmentType)
# get metrics collection types
types = c.get_all_metric_collection_types()
self.assertIsInstance(types, MetricCollectionTypes)
# create the simplest possible AutoScale group
# first create the launch configuration
time_string = '%d' % int(time.time())
lc_name = 'lc-%s' % time_string
lc = LaunchConfiguration(name=lc_name, image_id='ami-2272864b',
instance_type='t1.micro')
c.create_launch_configuration(lc)
found = False
lcs = c.get_all_launch_configurations()
for lc in lcs:
if lc.name == lc_name:
found = True
break
assert found
# now create autoscaling group
group_name = 'group-%s' % time_string
group = AutoScalingGroup(name=group_name, launch_config=lc,
availability_zones=['us-east-1a'],
min_size=1, max_size=1)
c.create_auto_scaling_group(group)
found = False
groups = c.get_all_groups()
for group in groups:
if group.name == group_name:
found = True
break
assert found
# now create a tag
tag = Tag(key='foo', value='bar', resource_id=group_name,
propagate_at_launch=True)
c.create_or_update_tags([tag])
found = False
tags = c.get_all_tags()
for tag in tags:
if tag.resource_id == group_name and tag.key == 'foo':
found = True
break
assert found
c.delete_tags([tag])
# shutdown instances and wait for them to disappear
group.shutdown_instances()
instances = True
while instances:
time.sleep(5)
groups = c.get_all_groups()
for group in groups:
if group.name == group_name:
if not group.instances:
instances = False
group.delete()
lc.delete()
found = True
while found:
found = False
time.sleep(5)
tags = c.get_all_tags()
for tag in tags:
if tag.resource_id == group_name and tag.key == 'foo':
found = True
assert not found
print('--- tests completed ---')
def test_ebs_optimized_regression(self):
c = AutoScaleConnection()
time_string = '%d' % int(time.time())
lc_name = 'lc-%s' % time_string
lc = LaunchConfiguration(
name=lc_name,
image_id='ami-2272864b',
instance_type='t1.micro',
ebs_optimized=True
)
# This failed due to the difference between native Python ``True/False``
# & the expected string variants.
c.create_launch_configuration(lc)
self.addCleanup(c.delete_launch_configuration, lc_name)
| apache-2.0 |
flavoso/gerencex | gerencex/core/tests/test_view_office_tickets.py | 1 | 1342 | import datetime
from django.contrib.auth.models import User
from django.shortcuts import resolve_url as r
from django.test import TestCase
from django.utils import timezone
from gerencex.core.models import Restday, Office
class OfficeTicketsViewTest(TestCase):
def setUp(self):
self.office = Office.objects.create(
name='Terceira Diacomp',
initials='DIACOMP3'
)
User.objects.create_user('testuser', 'test@user.com', 'senha123')
self.user = User.objects.get(username='testuser')
self.user.first_name = 'Ze'
self.user.last_name = 'Mane'
self.user.userdetail.office = self.office
self.user.save()
self.client.login(username='testuser', password='senha123')
self.resp = self.client.get(r('office_tickets'))
def test_get(self):
"""GET must return status code 200"""
self.assertEqual(200, self.resp.status_code)
def test_template(self):
"""Must use restdays.html"""
self.assertTemplateUsed(self.resp, 'office_tickets.html')
def test_html(self):
# print(self.resp.content)
contents = [
'Terceira Diacomp',
'Ze Mane'
]
for expected in contents:
with self.subTest():
self.assertContains(self.resp, expected)
| gpl-3.0 |
teslaji/homebase | venv/HomeBase/lib/python3.5/site-packages/pip/index.py | 237 | 47847 | """Routines related to PyPI, indexes"""
from __future__ import absolute_import
import logging
import cgi
from collections import namedtuple
import itertools
import sys
import os
import re
import mimetypes
import posixpath
import warnings
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._vendor.six.moves.urllib import request as urllib_request
from pip.compat import ipaddress
from pip.utils import (
Inf, cached_property, normalize_name, splitext, normalize_path,
ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS)
from pip.utils.deprecation import RemovedInPip8Warning
from pip.utils.logging import indent_log
from pip.exceptions import (
DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename,
UnsupportedWheel,
)
from pip.download import HAS_TLS, url_to_path, path_to_url
from pip.models import PyPI
from pip.wheel import Wheel, wheel_ext
from pip.pep425tags import supported_tags, supported_tags_noarch, get_platform
from pip._vendor import html5lib, requests, pkg_resources, six
from pip._vendor.packaging.version import parse as parse_version
from pip._vendor.requests.exceptions import SSLError
__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder']
# Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC)
SECURE_ORIGINS = [
# protocol, hostname, port
("https", "*", "*"),
("*", "localhost", "*"),
("*", "127.0.0.0/8", "*"),
("*", "::1/128", "*"),
("file", "*", None),
]
logger = logging.getLogger(__name__)
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
class PackageFinder(object):
"""This finds packages.
This is meant to match easy_install's technique for looking for
packages, by reading pages and looking for appropriate links.
"""
def __init__(self, find_links, index_urls,
allow_external=(), allow_unverified=(),
allow_all_external=False, allow_all_prereleases=False,
trusted_hosts=None, process_dependency_links=False,
session=None, format_control=None):
"""Create a PackageFinder.
:param format_control: A FormatControl object or None. Used to control
the selection of source packages / binary packages when consulting
the index and links.
"""
if session is None:
raise TypeError(
"PackageFinder() missing 1 required keyword argument: "
"'session'"
)
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
self.find_links = []
for link in find_links:
if link.startswith('~'):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
self.find_links.append(link)
self.index_urls = index_urls
self.dependency_links = []
# These are boring links that have already been logged somehow:
self.logged_links = set()
self.format_control = format_control or FormatControl(set(), set())
# Do we allow (safe and verifiable) externally hosted files?
self.allow_external = set(normalize_name(n) for n in allow_external)
# Which names are allowed to install insecure and unverifiable files?
self.allow_unverified = set(
normalize_name(n) for n in allow_unverified
)
# Anything that is allowed unverified is also allowed external
self.allow_external |= self.allow_unverified
# Do we allow all (safe and verifiable) externally hosted files?
self.allow_all_external = allow_all_external
# Domains that we won't emit warnings for when not using HTTPS
self.secure_origins = [
("*", host, "*")
for host in (trusted_hosts if trusted_hosts else [])
]
# Stores if we ignored any external links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_external = False
# Stores if we ignored any unsafe links so that we can instruct
# end users how to install them if no distributions are available
self.need_warn_unverified = False
# Do we want to allow _all_ pre-releases?
self.allow_all_prereleases = allow_all_prereleases
# Do we process dependency links?
self.process_dependency_links = process_dependency_links
# The Session we'll use to make requests
self.session = session
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not HAS_TLS:
for link in itertools.chain(self.index_urls, self.find_links):
parsed = urllib_parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
def add_dependency_links(self, links):
# # FIXME: this shouldn't be global list this, it should only
# # apply to requirements of the package that specifies the
# # dependency_links value
# # FIXME: also, we should track comes_from (i.e., use Link)
if self.process_dependency_links:
warnings.warn(
"Dependency Links processing has been deprecated and will be "
"removed in a future release.",
RemovedInPip8Warning,
)
self.dependency_links.extend(links)
@staticmethod
def _sort_locations(locations, expand_dir=False):
"""
Sort locations into "files" (archives) and "urls", and return
a pair of lists (files,urls)
"""
files = []
urls = []
# puts the url for the given file path into the appropriate list
def sort_path(path):
url = path_to_url(path)
if mimetypes.guess_type(url, strict=False)[0] == 'text/html':
urls.append(url)
else:
files.append(url)
for url in locations:
is_local_path = os.path.exists(url)
is_file_url = url.startswith('file:')
if is_local_path or is_file_url:
if is_local_path:
path = url
else:
path = url_to_path(url)
if os.path.isdir(path):
if expand_dir:
path = os.path.realpath(path)
for item in os.listdir(path):
sort_path(os.path.join(path, item))
elif is_file_url:
urls.append(url)
elif os.path.isfile(path):
sort_path(path)
else:
urls.append(url)
return files, urls
def _candidate_sort_key(self, candidate):
"""
Function used to generate link sort key for link tuples.
The greater the return value, the more preferred it is.
If not finding wheels, then sorted by version only.
If finding wheels, then the sort order is by version, then:
1. existing installs
2. wheels ordered via Wheel.support_index_min()
3. source archives
Note: it was considered to embed this logic into the Link
comparison operators, but then different sdist links
with the same version, would have to be considered equal
"""
support_num = len(supported_tags)
if candidate.location == INSTALLED_VERSION:
pri = 1
elif candidate.location.is_wheel:
# can raise InvalidWheelFilename
wheel = Wheel(candidate.location.filename)
if not wheel.supported():
raise UnsupportedWheel(
"%s is not a supported wheel for this platform. It "
"can't be sorted." % wheel.filename
)
pri = -(wheel.support_index_min())
else: # sdist
pri = -(support_num)
return (candidate.version, pri)
def _sort_versions(self, applicable_versions):
"""
Bring the latest version (and wheels) to the front, but maintain the
existing ordering as secondary. See the docstring for `_link_sort_key`
for details. This function is isolated for easier unit testing.
"""
return sorted(
applicable_versions,
key=self._candidate_sort_key,
reverse=True
)
def _validate_secure_origin(self, logger, location):
# Determine if this url used a secure transport mechanism
parsed = urllib_parse.urlparse(str(location))
origin = (parsed.scheme, parsed.hostname, parsed.port)
# Determine if our origin is a secure origin by looking through our
# hardcoded list of secure origins, as well as any additional ones
# configured on this PackageFinder instance.
for secure_origin in (SECURE_ORIGINS + self.secure_origins):
# Check to see if the protocol matches
if origin[0] != secure_origin[0] and secure_origin[0] != "*":
continue
try:
# We need to do this decode dance to ensure that we have a
# unicode object, even on Python 2.x.
addr = ipaddress.ip_address(
origin[1]
if (
isinstance(origin[1], six.text_type) or
origin[1] is None
)
else origin[1].decode("utf8")
)
network = ipaddress.ip_network(
secure_origin[1]
if isinstance(secure_origin[1], six.text_type)
else secure_origin[1].decode("utf8")
)
except ValueError:
# We don't have both a valid address or a valid network, so
# we'll check this origin against hostnames.
if origin[1] != secure_origin[1] and secure_origin[1] != "*":
continue
else:
# We have a valid address and network, so see if the address
# is contained within the network.
if addr not in network:
continue
# Check to see if the port patches
if (origin[2] != secure_origin[2] and
secure_origin[2] != "*" and
secure_origin[2] is not None):
continue
# If we've gotten here, then this origin matches the current
# secure origin and we should return True
return True
# If we've gotten to this point, then the origin isn't secure and we
# will not accept it as a valid location to search. We will however
# log a warning that we are ignoring it.
logger.warning(
"The repository located at %s is not a trusted or secure host and "
"is being ignored. If this repository is available via HTTPS it "
"is recommended to use HTTPS instead, otherwise you may silence "
"this warning and allow it anyways with '--trusted-host %s'.",
parsed.hostname,
parsed.hostname,
)
return False
def _get_index_urls_locations(self, project_name):
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url):
loc = posixpath.join(url, project_url_name)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith('/'):
loc = loc + '/'
return loc
project_url_name = urllib_parse.quote(project_name.lower())
if self.index_urls:
# Check that we have the url_name correctly spelled:
# Only check main index if index URL is given
main_index_url = Link(
mkurl_pypi_url(self.index_urls[0]),
trusted=True,
)
page = self._get_page(main_index_url)
if page is None and PyPI.netloc not in str(main_index_url):
warnings.warn(
"Failed to find %r at %s. It is suggested to upgrade "
"your index to support normalized names as the name in "
"/simple/{name}." % (project_name, main_index_url),
RemovedInPip8Warning,
)
project_url_name = self._find_url_name(
Link(self.index_urls[0], trusted=True),
project_url_name,
) or project_url_name
if project_url_name is not None:
return [mkurl_pypi_url(url) for url in self.index_urls]
return []
def _find_all_versions(self, project_name):
"""Find all available versions for project_name
This checks index_urls, find_links and dependency_links
All versions found are returned
See _link_package_versions for details on which files are accepted
"""
index_locations = self._get_index_urls_locations(project_name)
index_file_loc, index_url_loc = self._sort_locations(index_locations)
fl_file_loc, fl_url_loc = self._sort_locations(
self.find_links, expand_dir=True)
dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links)
file_locations = (
Link(url) for url in itertools.chain(
index_file_loc, fl_file_loc, dep_file_loc)
)
# We trust every url that the user has given us whether it was given
# via --index-url or --find-links
# We explicitly do not trust links that came from dependency_links
# We want to filter out any thing which does not have a secure origin.
url_locations = [
link for link in itertools.chain(
(Link(url, trusted=True) for url in index_url_loc),
(Link(url, trusted=True) for url in fl_url_loc),
(Link(url) for url in dep_url_loc),
)
if self._validate_secure_origin(logger, link)
]
logger.debug('%d location(s) to search for versions of %s:',
len(url_locations), project_name)
for location in url_locations:
logger.debug('* %s', location)
canonical_name = pkg_resources.safe_name(project_name).lower()
formats = fmt_ctl_formats(self.format_control, canonical_name)
search = Search(project_name.lower(), canonical_name, formats)
find_links_versions = self._package_versions(
# We trust every directly linked archive in find_links
(Link(url, '-f', trusted=True) for url in self.find_links),
search
)
page_versions = []
for page in self._get_pages(url_locations, project_name):
logger.debug('Analyzing links from page %s', page.url)
with indent_log():
page_versions.extend(
self._package_versions(page.links, search)
)
dependency_versions = self._package_versions(
(Link(url) for url in self.dependency_links), search
)
if dependency_versions:
logger.debug(
'dependency_links found: %s',
', '.join([
version.location.url for version in dependency_versions
])
)
file_versions = self._package_versions(file_locations, search)
if file_versions:
file_versions.sort(reverse=True)
logger.debug(
'Local files found: %s',
', '.join([
url_to_path(candidate.location.url)
for candidate in file_versions
])
)
# This is an intentional priority ordering
return (
file_versions + find_links_versions + page_versions +
dependency_versions
)
def find_requirement(self, req, upgrade):
"""Try to find an InstallationCandidate for req
Expects req, an InstallRequirement and upgrade, a boolean
Returns an InstallationCandidate or None
May raise DistributionNotFound or BestVersionAlreadyInstalled
"""
all_versions = self._find_all_versions(req.name)
# Filter out anything which doesn't match our specifier
_versions = set(
req.specifier.filter(
# We turn the version object into a str here because otherwise
# when we're debundled but setuptools isn't, Python will see
# packaging.version.Version and
# pkg_resources._vendor.packaging.version.Version as different
# types. This way we'll use a str as a common data interchange
# format. If we stop using the pkg_resources provided specifier
# and start using our own, we can drop the cast to str().
[str(x.version) for x in all_versions],
prereleases=(
self.allow_all_prereleases
if self.allow_all_prereleases else None
),
)
)
applicable_versions = [
# Again, converting to str to deal with debundling.
x for x in all_versions if str(x.version) in _versions
]
if req.satisfied_by is not None:
# Finally add our existing versions to the front of our versions.
applicable_versions.insert(
0,
InstallationCandidate(
req.name,
req.satisfied_by.version,
INSTALLED_VERSION,
)
)
existing_applicable = True
else:
existing_applicable = False
applicable_versions = self._sort_versions(applicable_versions)
if not upgrade and existing_applicable:
if applicable_versions[0].location is INSTALLED_VERSION:
logger.debug(
'Existing installed version (%s) is most up-to-date and '
'satisfies requirement',
req.satisfied_by.version,
)
else:
logger.debug(
'Existing installed version (%s) satisfies requirement '
'(most up-to-date version is %s)',
req.satisfied_by.version,
applicable_versions[0][2],
)
return None
if not applicable_versions:
logger.critical(
'Could not find a version that satisfies the requirement %s '
'(from versions: %s)',
req,
', '.join(
sorted(
set(str(i.version) for i in all_versions),
key=parse_version,
)
)
)
if self.need_warn_external:
logger.warning(
"Some externally hosted files were ignored as access to "
"them may be unreliable (use --allow-external %s to "
"allow).",
req.name,
)
if self.need_warn_unverified:
logger.warning(
"Some insecure and unverifiable files were ignored"
" (use --allow-unverified %s to allow).",
req.name,
)
raise DistributionNotFound(
'No matching distribution found for %s' % req
)
if applicable_versions[0].location is INSTALLED_VERSION:
# We have an existing version, and its the best version
logger.debug(
'Installed version (%s) is most up-to-date (past versions: '
'%s)',
req.satisfied_by.version,
', '.join(str(i.version) for i in applicable_versions[1:]) or
"none",
)
raise BestVersionAlreadyInstalled
if len(applicable_versions) > 1:
logger.debug(
'Using version %s (newest of versions: %s)',
applicable_versions[0].version,
', '.join(str(i.version) for i in applicable_versions)
)
selected_version = applicable_versions[0].location
if (selected_version.verifiable is not None and not
selected_version.verifiable):
logger.warning(
"%s is potentially insecure and unverifiable.", req.name,
)
return selected_version
def _find_url_name(self, index_url, url_name):
"""
Finds the true URL name of a package, when the given name isn't quite
correct.
This is usually used to implement case-insensitivity.
"""
if not index_url.url.endswith('/'):
# Vaguely part of the PyPI API... weird but true.
# FIXME: bad to modify this?
index_url.url += '/'
page = self._get_page(index_url)
if page is None:
logger.critical('Cannot fetch index base URL %s', index_url)
return
norm_name = normalize_name(url_name)
for link in page.links:
base = posixpath.basename(link.path.rstrip('/'))
if norm_name == normalize_name(base):
logger.debug(
'Real name of requirement %s is %s', url_name, base,
)
return base
return None
def _get_pages(self, locations, project_name):
"""
Yields (page, page_url) from the given locations, skipping
locations that have errors, and adding download/homepage links
"""
all_locations = list(locations)
seen = set()
normalized = normalize_name(project_name)
while all_locations:
location = all_locations.pop(0)
if location in seen:
continue
seen.add(location)
page = self._get_page(location)
if page is None:
continue
yield page
for link in page.rel_links():
if (normalized not in self.allow_external and not
self.allow_all_external):
self.need_warn_external = True
logger.debug(
"Not searching %s for files because external "
"urls are disallowed.",
link,
)
continue
if (link.trusted is not None and not
link.trusted and
normalized not in self.allow_unverified):
logger.debug(
"Not searching %s for urls, it is an "
"untrusted link and cannot produce safe or "
"verifiable files.",
link,
)
self.need_warn_unverified = True
continue
all_locations.append(link)
_py_version_re = re.compile(r'-py([123]\.?[0-9]?)$')
def _sort_links(self, links):
"""
Returns elements of links in order, non-egg links first, egg links
second, while eliminating duplicates
"""
eggs, no_eggs = [], []
seen = set()
for link in links:
if link not in seen:
seen.add(link)
if link.egg_fragment:
eggs.append(link)
else:
no_eggs.append(link)
return no_eggs + eggs
def _package_versions(self, links, search):
result = []
for link in self._sort_links(links):
v = self._link_package_versions(link, search)
if v is not None:
result.append(v)
return result
def _log_skipped_link(self, link, reason):
if link not in self.logged_links:
logger.debug('Skipping link %s; %s', link, reason)
self.logged_links.add(link)
def _link_package_versions(self, link, search):
"""Return an InstallationCandidate or None"""
platform = get_platform()
version = None
if link.egg_fragment:
egg_info = link.egg_fragment
ext = link.ext
else:
egg_info, ext = link.splitext()
if not ext:
self._log_skipped_link(link, 'not a file')
return
if ext not in SUPPORTED_EXTENSIONS:
self._log_skipped_link(
link, 'unsupported archive format: %s' % ext)
return
if "binary" not in search.formats and ext == wheel_ext:
self._log_skipped_link(
link, 'No binaries permitted for %s' % search.supplied)
return
if "macosx10" in link.path and ext == '.zip':
self._log_skipped_link(link, 'macosx10 one')
return
if ext == wheel_ext:
try:
wheel = Wheel(link.filename)
except InvalidWheelFilename:
self._log_skipped_link(link, 'invalid wheel filename')
return
if (pkg_resources.safe_name(wheel.name).lower() !=
search.canonical):
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if not wheel.supported():
self._log_skipped_link(
link, 'it is not compatible with this Python')
return
# This is a dirty hack to prevent installing Binary Wheels from
# PyPI unless it is a Windows or Mac Binary Wheel. This is
# paired with a change to PyPI disabling uploads for the
# same. Once we have a mechanism for enabling support for
# binary wheels on linux that deals with the inherent problems
# of binary distribution this can be removed.
comes_from = getattr(link, "comes_from", None)
if (
(
not platform.startswith('win') and not
platform.startswith('macosx') and not
platform == 'cli'
) and
comes_from is not None and
urllib_parse.urlparse(
comes_from.url
).netloc.endswith(PyPI.netloc)):
if not wheel.supported(tags=supported_tags_noarch):
self._log_skipped_link(
link,
"it is a pypi-hosted binary "
"Wheel on an unsupported platform",
)
return
version = wheel.version
# This should be up by the search.ok_binary check, but see issue 2700.
if "source" not in search.formats and ext != wheel_ext:
self._log_skipped_link(
link, 'No sources permitted for %s' % search.supplied)
return
if not version:
version = egg_info_matches(egg_info, search.supplied, link)
if version is None:
self._log_skipped_link(
link, 'wrong project name (not %s)' % search.supplied)
return
if (link.internal is not None and not
link.internal and not
normalize_name(search.supplied).lower()
in self.allow_external and not
self.allow_all_external):
# We have a link that we are sure is external, so we should skip
# it unless we are allowing externals
self._log_skipped_link(link, 'it is externally hosted')
self.need_warn_external = True
return
if (link.verifiable is not None and not
link.verifiable and not
(normalize_name(search.supplied).lower()
in self.allow_unverified)):
# We have a link that we are sure we cannot verify its integrity,
# so we should skip it unless we are allowing unsafe installs
# for this requirement.
self._log_skipped_link(
link, 'it is an insecure and unverifiable file')
self.need_warn_unverified = True
return
match = self._py_version_re.search(version)
if match:
version = version[:match.start()]
py_version = match.group(1)
if py_version != sys.version[:3]:
self._log_skipped_link(
link, 'Python version is incorrect')
return
logger.debug('Found link %s, version: %s', link, version)
return InstallationCandidate(search.supplied, version, link)
def _get_page(self, link):
return HTMLPage.get_page(link, session=self.session)
def egg_info_matches(
egg_info, search_name, link,
_egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)):
"""Pull the version part out of a string.
:param egg_info: The string to parse. E.g. foo-2.1
:param search_name: The name of the package this belongs to. None to
infer the name. Note that this cannot unambiguously parse strings
like foo-2-2 which might be foo, 2-2 or foo-2, 2.
:param link: The link the string came from, for logging on failure.
"""
match = _egg_info_re.search(egg_info)
if not match:
logger.debug('Could not parse version from link: %s', link)
return None
if search_name is None:
full_match = match.group(0)
return full_match[full_match.index('-'):]
name = match.group(0).lower()
# To match the "safe" name that pkg_resources creates:
name = name.replace('_', '-')
# project name and version must be separated by a dash
look_for = search_name.lower() + "-"
if name.startswith(look_for):
return match.group(0)[len(look_for):]
else:
return None
class HTMLPage(object):
"""Represents one page, along with its URL"""
def __init__(self, content, url, headers=None, trusted=None):
# Determine if we have any encoding information in our headers
encoding = None
if headers and "Content-Type" in headers:
content_type, params = cgi.parse_header(headers["Content-Type"])
if "charset" in params:
encoding = params['charset']
self.content = content
self.parsed = html5lib.parse(
self.content,
encoding=encoding,
namespaceHTMLElements=False,
)
self.url = url
self.headers = headers
self.trusted = trusted
def __str__(self):
return self.url
@classmethod
def get_page(cls, link, skip_archives=True, session=None):
if session is None:
raise TypeError(
"get_page() missing 1 required keyword argument: 'session'"
)
url = link.url
url = url.split('#', 1)[0]
# Check for VCS schemes that do not support lookup as web pages.
from pip.vcs import VcsSupport
for scheme in VcsSupport.schemes:
if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
logger.debug('Cannot look at %s URL %s', scheme, link)
return None
try:
if skip_archives:
filename = link.filename
for bad_ext in ARCHIVE_EXTENSIONS:
if filename.endswith(bad_ext):
content_type = cls._get_content_type(
url, session=session,
)
if content_type.lower().startswith('text/html'):
break
else:
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
logger.debug('Getting page %s', url)
# Tack index.html onto file:// URLs that point to directories
(scheme, netloc, path, params, query, fragment) = \
urllib_parse.urlparse(url)
if (scheme == 'file' and
os.path.isdir(urllib_request.url2pathname(path))):
# add trailing slash if not present so urljoin doesn't trim
# final segment
if not url.endswith('/'):
url += '/'
url = urllib_parse.urljoin(url, 'index.html')
logger.debug(' file: URL is directory, getting %s', url)
resp = session.get(
url,
headers={
"Accept": "text/html",
"Cache-Control": "max-age=600",
},
)
resp.raise_for_status()
# The check for archives above only works if the url ends with
# something that looks like an archive. However that is not a
# requirement of an url. Unless we issue a HEAD request on every
# url we cannot know ahead of time for sure if something is HTML
# or not. However we can check after we've downloaded it.
content_type = resp.headers.get('Content-Type', 'unknown')
if not content_type.lower().startswith("text/html"):
logger.debug(
'Skipping page %s because of Content-Type: %s',
link,
content_type,
)
return
inst = cls(
resp.content, resp.url, resp.headers,
trusted=link.trusted,
)
except requests.HTTPError as exc:
level = 2 if exc.response.status_code == 404 else 1
cls._handle_fail(link, exc, url, level=level)
except requests.ConnectionError as exc:
cls._handle_fail(link, "connection error: %s" % exc, url)
except requests.Timeout:
cls._handle_fail(link, "timed out", url)
except SSLError as exc:
reason = ("There was a problem confirming the ssl certificate: "
"%s" % exc)
cls._handle_fail(link, reason, url, level=2, meth=logger.info)
else:
return inst
@staticmethod
def _handle_fail(link, reason, url, level=1, meth=None):
if meth is None:
meth = logger.debug
meth("Could not fetch URL %s: %s - skipping", link, reason)
@staticmethod
def _get_content_type(url, session):
"""Get the Content-Type of the given url, using a HEAD request"""
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url)
if scheme not in ('http', 'https'):
# FIXME: some warning or something?
# assertion error?
return ''
resp = session.head(url, allow_redirects=True)
resp.raise_for_status()
return resp.headers.get("Content-Type", "")
@cached_property
def api_version(self):
metas = [
x for x in self.parsed.findall(".//meta")
if x.get("name", "").lower() == "api-version"
]
if metas:
try:
return int(metas[0].get("value", None))
except (TypeError, ValueError):
pass
return None
@cached_property
def base_url(self):
bases = [
x for x in self.parsed.findall(".//base")
if x.get("href") is not None
]
if bases and bases[0].get("href"):
return bases[0].get("href")
else:
return self.url
@property
def links(self):
"""Yields all links in the page"""
for anchor in self.parsed.findall(".//a"):
if anchor.get("href"):
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
# Determine if this link is internal. If that distinction
# doesn't make sense in this context, then we don't make
# any distinction.
internal = None
if self.api_version and self.api_version >= 2:
# Only api_versions >= 2 have a distinction between
# external and internal links
internal = bool(
anchor.get("rel") and
"internal" in anchor.get("rel").split()
)
yield Link(url, self, internal=internal)
def rel_links(self, rels=('homepage', 'download')):
"""Yields all links with the given relations"""
rels = set(rels)
for anchor in self.parsed.findall(".//a"):
if anchor.get("rel") and anchor.get("href"):
found_rels = set(anchor.get("rel").split())
# Determine the intersection between what rels were found and
# what rels were being looked for
if found_rels & rels:
href = anchor.get("href")
url = self.clean_link(
urllib_parse.urljoin(self.base_url, href)
)
yield Link(url, self, trusted=False)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
def clean_link(self, url):
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self._clean_re.sub(
lambda match: '%%%2x' % ord(match.group(0)), url)
class Link(object):
def __init__(self, url, comes_from=None, internal=None, trusted=None):
# url can be a UNC windows share
if url != Inf and url.startswith('\\\\'):
url = path_to_url(url)
self.url = url
self.comes_from = comes_from
self.internal = internal
self.trusted = trusted
def __str__(self):
if self.comes_from:
return '%s (from %s)' % (self.url, self.comes_from)
else:
return str(self.url)
def __repr__(self):
return '<Link %s>' % self
def __eq__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url == other.url
def __ne__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url != other.url
def __lt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url < other.url
def __le__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url <= other.url
def __gt__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url > other.url
def __ge__(self, other):
if not isinstance(other, Link):
return NotImplemented
return self.url >= other.url
def __hash__(self):
return hash(self.url)
@property
def filename(self):
_, netloc, path, _, _ = urllib_parse.urlsplit(self.url)
name = posixpath.basename(path.rstrip('/')) or netloc
name = urllib_parse.unquote(name)
assert name, ('URL %r produced no filename' % self.url)
return name
@property
def scheme(self):
return urllib_parse.urlsplit(self.url)[0]
@property
def netloc(self):
return urllib_parse.urlsplit(self.url)[1]
@property
def path(self):
return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2])
def splitext(self):
return splitext(posixpath.basename(self.path.rstrip('/')))
@property
def ext(self):
return self.splitext()[1]
@property
def url_without_fragment(self):
scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url)
return urllib_parse.urlunsplit((scheme, netloc, path, query, None))
_egg_fragment_re = re.compile(r'#egg=([^&]*)')
@property
def egg_fragment(self):
match = self._egg_fragment_re.search(self.url)
if not match:
return None
return match.group(1)
_hash_re = re.compile(
r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)'
)
@property
def hash(self):
match = self._hash_re.search(self.url)
if match:
return match.group(2)
return None
@property
def hash_name(self):
match = self._hash_re.search(self.url)
if match:
return match.group(1)
return None
@property
def show_url(self):
return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0])
@property
def verifiable(self):
"""
Returns True if this link can be verified after download, False if it
cannot, and None if we cannot determine.
"""
trusted = self.trusted or getattr(self.comes_from, "trusted", None)
if trusted is not None and trusted:
# This link came from a trusted source. It *may* be verifiable but
# first we need to see if this page is operating under the new
# API version.
try:
api_version = getattr(self.comes_from, "api_version", None)
api_version = int(api_version)
except (ValueError, TypeError):
api_version = None
if api_version is None or api_version <= 1:
# This link is either trusted, or it came from a trusted,
# however it is not operating under the API version 2 so
# we can't make any claims about if it's safe or not
return
if self.hash:
# This link came from a trusted source and it has a hash, so we
# can consider it safe.
return True
else:
# This link came from a trusted source, using the new API
# version, and it does not have a hash. It is NOT verifiable
return False
elif trusted is not None:
# This link came from an untrusted source and we cannot trust it
return False
@property
def is_wheel(self):
return self.ext == wheel_ext
@property
def is_artifact(self):
"""
Determines if this points to an actual artifact (e.g. a tarball) or if
it points to an "abstract" thing like a path or a VCS location.
"""
from pip.vcs import vcs
if self.scheme in vcs.all_schemes:
return False
return True
# An object to represent the "link" for the installed version of a requirement.
# Using Inf as the url makes it sort higher.
INSTALLED_VERSION = Link(Inf)
FormatControl = namedtuple('FormatControl', 'no_binary only_binary')
"""This object has two fields, no_binary and only_binary.
If a field is falsy, it isn't set. If it is {':all:'}, it should match all
packages except those listed in the other field. Only one field can be set
to {':all:'} at a time. The rest of the time exact package name matches
are listed, with any given package only showing up in one field at a time.
"""
def fmt_ctl_handle_mutual_exclude(value, target, other):
new = value.split(',')
while ':all:' in new:
other.clear()
target.clear()
target.add(':all:')
del new[:new.index(':all:') + 1]
if ':none:' not in new:
# Without a none, we want to discard everything as :all: covers it
return
for name in new:
if name == ':none:':
target.clear()
continue
name = pkg_resources.safe_name(name).lower()
other.discard(name)
target.add(name)
def fmt_ctl_formats(fmt_ctl, canonical_name):
result = set(["binary", "source"])
if canonical_name in fmt_ctl.only_binary:
result.discard('source')
elif canonical_name in fmt_ctl.no_binary:
result.discard('binary')
elif ':all:' in fmt_ctl.only_binary:
result.discard('source')
elif ':all:' in fmt_ctl.no_binary:
result.discard('binary')
return frozenset(result)
def fmt_ctl_no_binary(fmt_ctl):
fmt_ctl_handle_mutual_exclude(
':all:', fmt_ctl.no_binary, fmt_ctl.only_binary)
def fmt_ctl_no_use_wheel(fmt_ctl):
fmt_ctl_no_binary(fmt_ctl)
warnings.warn(
'--no-use-wheel is deprecated and will be removed in the future. '
' Please use --no-binary :all: instead.', DeprecationWarning,
stacklevel=2)
Search = namedtuple('Search', 'supplied canonical formats')
"""Capture key aspects of a search.
:attribute supplied: The user supplied package.
:attribute canonical: The canonical package name.
:attribute formats: The formats allowed for this package. Should be a set
with 'binary' or 'source' or both in it.
"""
| gpl-3.0 |
dr4ke616/donedeal-notifier | notifier/mail/client.py | 2 | 1978 | import smtplib
from os.path import basename
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
class Email(object):
mail_server = 'localhost'
port = 0
mail_user = None
mail_passwd = None
content_type = 'html'
def __init__(self, me, you, subject, body, **kwargs):
self.me = me
self.you = you if isinstance(you, list) else [you]
self.subject = subject
self.body = body.encode('utf-8')
for key, value in kwargs.items():
setattr(self, key, value)
def __str__(self):
return self._message.as_string()
def _build_mime_multipart(self, location):
mime = MIMEMultipart('alternative')
mime['Subject'] = self.subject
mime['From'] = self.me
mime['To'] = location
mime.attach(MIMEText(self.body, self.content_type))
self._message = mime
def _authenticate(self, smtpserver):
if self.mail_user is not None or self.mail_passwd is not None:
smtpserver.ehlo()
smtpserver.starttls()
# smtpserver.ehlo
smtpserver.login(self.mail_user, self.mail_passwd)
def attach(self, attachment):
with open(attachment, "rb") as f:
self._message.attach(MIMEApplication(
f.read(),
Content_Disposition='attachment; filename="%s"' % basename(f),
Name=basename(f)
))
def send(self):
for location in self.you:
self._build_mime_multipart(location)
smtpserver = smtplib.SMTP(self.mail_server, self.port)
self._authenticate(smtpserver)
smtpserver.sendmail(self.me, location, str(self))
smtpserver.close()
if __name__ == '__main__':
Email(
'adamdrakeford@gmail.com',
'adamdrakeford@gmail.com',
'Test',
'This is a test'
).send()
| mit |
markovmodel/PyEMMA | pyemma/coordinates/tests/test_traj_info_cache.py | 2 | 14155 | # This file is part of PyEMMA.
#
# Copyright (c) 2015, 2014 Computational Molecular Biology Group, Freie Universitaet Berlin (GER)
#
# PyEMMA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Created on 30.04.2015
@author: marscher
'''
from tempfile import NamedTemporaryFile
import os
import tempfile
import unittest
from unittest import mock
from pyemma.coordinates import api
from pyemma.coordinates.data.feature_reader import FeatureReader
from pyemma.coordinates.data.numpy_filereader import NumPyFileReader
from pyemma.coordinates.data.py_csv_reader import PyCSVReader
from pyemma.coordinates.data.util.traj_info_backends import SqliteDB
from pyemma.coordinates.data.util.traj_info_cache import TrajectoryInfoCache
from pyemma.coordinates.tests.util import create_traj
from pyemma.datasets import get_bpti_test_data
from pyemma.util import config
from pyemma.util.contexts import settings
from pyemma.util.files import TemporaryDirectory
import mdtraj
import pkg_resources
import pyemma
import numpy as np
xtcfiles = get_bpti_test_data()['trajs']
pdbfile = get_bpti_test_data()['top']
class TestTrajectoryInfoCache(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.old_instance = TrajectoryInfoCache.instance()
config.use_trajectory_lengths_cache = True
def setUp(self):
self.work_dir = tempfile.mkdtemp(prefix="traj_cache_test")
self.tmpfile = tempfile.mktemp(dir=self.work_dir)
self.db = TrajectoryInfoCache(self.tmpfile)
# overwrite TrajectoryInfoCache._instance with self.db...
TrajectoryInfoCache._instance = self.db
config.use_trajectory_lengths_cache = True
def tearDown(self):
self.db.close()
os.unlink(self.tmpfile)
import shutil
shutil.rmtree(self.work_dir, ignore_errors=True)
@classmethod
def tearDownClass(cls):
TrajectoryInfoCache._instance = cls.old_instance
config.use_trajectory_lengths_cache = False
def test_get_instance(self):
# test for exceptions in singleton creation
inst = TrajectoryInfoCache.instance()
inst.current_db_version
self.assertIs(inst, self.db)
def test_store_load_traj_info(self):
x = np.random.random((10, 3))
from pyemma.util._config import Config
my_conf = Config()
my_conf.cfg_dir = self.work_dir
with mock.patch('pyemma.coordinates.data.util.traj_info_cache.config', my_conf):
with NamedTemporaryFile(delete=False) as fh:
np.savetxt(fh.name, x)
reader = api.source(fh.name)
info = self.db[fh.name, reader]
self.db.close()
self.db.__init__(self.db._database.filename)
info2 = self.db[fh.name, reader]
self.assertEqual(info2, info)
def test_exceptions(self):
# in accessible files
not_existant = ''.join(
chr(i) for i in np.random.randint(65, 90, size=10)) + '.npy'
bad = [not_existant] # should be unaccessible or non existent
with self.assertRaises(ValueError) as cm:
api.source(bad)
assert bad[0] in cm.exception.message
# empty files
with NamedTemporaryFile(delete=False) as f:
f.close()
with self.assertRaises(ValueError) as cm:
api.source(f.name)
assert f.name in cm.exception.message
# bogus files
with NamedTemporaryFile(suffix='.npy', delete=False) as f:
x = np.array([1, 2, 3])
np.save(f, x)
with open(f.name, 'wb') as f2:
f2.write(b'asdf')
with self.assertRaises(IOError) as cm:
api.source(f.name)
def test_featurereader_xtc(self):
# cause cache failures
with settings(use_trajectory_lengths_cache=False):
reader = FeatureReader(xtcfiles, pdbfile)
results = {}
for f in xtcfiles:
traj_info = self.db[f, reader]
results[f] = traj_info.ndim, traj_info.length, traj_info.offsets
expected = {}
for f in xtcfiles:
with mdtraj.open(f) as fh:
length = len(fh)
ndim = fh.read(1)[0].shape[1]
offsets = fh.offsets if hasattr(fh, 'offsets') else []
expected[f] = ndim, length, offsets
np.testing.assert_equal(results, expected)
def test_npy_reader(self):
lengths_and_dims = [(7, 3), (23, 3), (27, 3)]
data = [
np.empty((n, dim)) for n, dim in lengths_and_dims]
files = []
with TemporaryDirectory() as td:
for i, x in enumerate(data):
fn = os.path.join(td, "%i.npy" % i)
np.save(fn, x)
files.append(fn)
reader = NumPyFileReader(files)
# cache it and compare
results = {f: (self.db[f, reader].length, self.db[f, reader].ndim,
self.db[f, reader].offsets) for f in files}
expected = {f: (len(data[i]), data[i].shape[1], [])
for i, f in enumerate(files)}
np.testing.assert_equal(results, expected)
def test_csvreader(self):
data = np.random.random((101, 3))
fn = tempfile.mktemp()
try:
np.savetxt(fn, data)
# calc offsets
offsets = [0]
with open(fn, PyCSVReader.DEFAULT_OPEN_MODE) as new_fh:
while new_fh.readline():
offsets.append(new_fh.tell())
reader = PyCSVReader(fn)
assert reader.dimension() == 3
trajinfo = reader._get_traj_info(fn)
np.testing.assert_equal(offsets, trajinfo.offsets)
finally:
os.unlink(fn)
def test_fragmented_reader(self):
top_file = pkg_resources.resource_filename(__name__, 'data/test.pdb')
trajfiles = []
nframes = []
with TemporaryDirectory() as wd:
for _ in range(3):
f, _, l = create_traj(top_file, dir=wd)
trajfiles.append(f)
nframes.append(l)
# three trajectories: one consisting of all three, one consisting of the first,
# one consisting of the first and the last
reader = api.source(
[trajfiles, [trajfiles[0]], [trajfiles[0], trajfiles[2]]], top=top_file)
np.testing.assert_equal(reader.trajectory_lengths(),
[sum(nframes), nframes[0], nframes[0] + nframes[2]])
def test_feature_reader_xyz(self):
traj = mdtraj.load(xtcfiles, top=pdbfile)
length = len(traj)
with NamedTemporaryFile(mode='wb', suffix='.xyz', delete=False) as f:
fn = f.name
traj.save_xyz(fn)
f.close()
reader = pyemma.coordinates.source(fn, top=pdbfile)
self.assertEqual(reader.trajectory_length(0), length)
def test_data_in_mem(self):
# make sure cache is not used for data in memory!
data = [np.empty((3, 3))] * 3
api.source(data)
self.assertEqual(self.db.num_entries, 0)
def test_old_db_conversion(self):
# prior 2.1, database only contained lengths (int as string) entries
# check conversion is happening
with NamedTemporaryFile(suffix='.npy', delete=False) as f:
db = TrajectoryInfoCache(None)
fn = f.name
np.save(fn, [1, 2, 3])
f.close() # windows sucks
reader = api.source(fn)
hash = db._get_file_hash(fn)
from pyemma.coordinates.data.util.traj_info_backends import DictDB
db._database = DictDB()
db._database.db_version = 0
info = db[fn, reader]
assert info.length == 3
assert info.ndim == 1
assert info.offsets == []
def test_corrupted_db(self):
with NamedTemporaryFile(mode='w', suffix='.dat', delete=False) as f:
f.write("makes no sense!!!!")
f.close()
name = f.name
import warnings
with warnings.catch_warnings(record=True) as cm:
warnings.simplefilter('always')
db = TrajectoryInfoCache(name)
assert len(cm) == 1
assert "corrupted" in str(cm[-1].message)
# ensure we can perform lookups on the broken db without exception.
r = api.source(xtcfiles[0], top=pdbfile)
db[xtcfiles[0], r]
def test_n_entries(self):
assert config.use_trajectory_lengths_cache
self.assertEqual(self.db.num_entries, 0)
assert TrajectoryInfoCache._instance is self.db
pyemma.coordinates.source(xtcfiles, top=pdbfile)
self.assertEqual(self.db.num_entries, len(xtcfiles))
def test_max_n_entries(self):
data = [np.random.random((10, 3)) for _ in range(20)]
max_entries = 10
config.traj_info_max_entries = max_entries
files = []
with TemporaryDirectory() as td:
for i, arr in enumerate(data):
f = os.path.join(td, "%s.npy" % i)
np.save(f, arr)
files.append(f)
pyemma.coordinates.source(files)
self.assertLessEqual(self.db.num_entries, max_entries)
self.assertGreater(self.db.num_entries, 0)
def test_max_size(self):
data = [np.random.random((150, 10)) for _ in range(150)]
max_size = 1
files = []
with TemporaryDirectory() as td, settings(traj_info_max_size=max_size, show_progress_bars=False):
for i, arr in enumerate(data):
f = os.path.join(td, "%s.txt" % i)
# save as txt to enforce creation of offsets
np.savetxt(f, arr)
files.append(f)
pyemma.coordinates.source(files)
self.assertLessEqual(os.stat(self.db.database_filename).st_size / 1024, config.traj_info_max_size)
self.assertGreater(self.db.num_entries, 0)
def test_no_working_directory(self):
# this is the case as long as the user has not yet created a config directory via config.save()
self.db._database = SqliteDB(filename=None)
# trigger caching
pyemma.coordinates.source(xtcfiles, top=pdbfile)
def test_no_sqlite(self):
# create new instance (init has to be called, install temporary import hook to raise importerror for sqlite3
import sys
del sys.modules['sqlite3']
class meta_ldr(object):
def find_module(self, fullname, path):
if fullname.startswith('sqlite3'):
return self
def load_module(self, fullname, path=None):
raise ImportError()
import warnings
try:
sys.meta_path.insert(0, meta_ldr())
# import sqlite3
with warnings.catch_warnings(record=True) as cw:
db = TrajectoryInfoCache()
self.assertNotIsInstance(db._database, SqliteDB)
self.assertEqual(len(cw), 1)
self.assertIn("sqlite3 package not available", cw[0].message.args[0])
finally:
del sys.meta_path[0]
def test_in_memory_db(self):
""" new instance, not yet saved to disk, no lru cache avail """
old_cfg_dir = config.cfg_dir
try:
config._cfg_dir = ''
db = TrajectoryInfoCache()
reader = pyemma.coordinates.source(xtcfiles, top=pdbfile)
info = db[xtcfiles[0], reader]
self.assertIsInstance(db._database, SqliteDB)
directory = db._database._database_from_key(info.hash_value)
assert directory is None
finally:
from pyemma.util.exceptions import ConfigDirectoryException
try:
config.cfg_dir = old_cfg_dir
except ConfigDirectoryException:
pass
def test_stress(self):
arrays = [np.empty((5, 2))] * 100
npy_files = [os.path.join(self.work_dir, '{}.npy'.format(i)) for i in range(len(arrays))]
[np.save(f, x) for f, x in zip(npy_files, arrays)]
env = os.environ.copy()
env['PYEMMA_CFG_DIR'] = self.work_dir
import subprocess
import sys
import time
script = 'import pyemma; pyemma.coordinates.source({files})' \
.format(cfg_dir=self.work_dir, files=npy_files)
failed = False
procs = [subprocess.Popen([sys.executable, '-c', script], env=env) for _ in range(10)]
error = None
while procs:
for proc in procs:
retcode = proc.poll()
if retcode is not None:
if retcode != 0:
pass
#stdout = proc.stdout.read()
#stderr = proc.stderr.read()
#error = '{};;{}'.format(stdout, stderr)
procs.remove(proc)
#break
else: # No process is done, wait a bit and check again.
time.sleep(.1)
continue
# Here, `proc` has finished with return code `retcode`
if retcode is not None and retcode != 0:
print('process failed with {}'.format(retcode))
failed = True
break
self.assertTrue(not failed, msg=error)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
trehn/teamvault | teamvault/apps/secrets/admin.py | 2 | 2477 | from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import AccessRequest, Secret, SecretRevision
class AccessRequestAdmin(admin.ModelAdmin):
fieldsets = (
(_("Subject"), {
'fields': (
'requester',
'secret',
'created',
'reason_request',
),
}),
(_("Status"), {
'fields': (
'reviewers',
'status',
'closed',
'closed_by',
'reason_rejected',
),
}),
)
date_hierarchy = 'created'
list_display = ('requester', 'secret', 'status', 'created')
list_filter = ('status',)
readonly_fields = ('created',)
search_fields = ('requester__username', 'password__name',)
admin.site.register(AccessRequest, AccessRequestAdmin)
class SecretAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': (
'name',
'content_type',
'description',
),
}),
(_("Audit"), {
'fields': (
'created',
'last_read',
),
}),
(_("Security"), {
'fields': (
'status',
'access_policy',
'needs_changing_on_leave',
'allowed_groups',
'allowed_users',
),
}),
)
date_hierarchy = 'created'
list_display = ('name', 'last_read')
list_filter = ('access_policy', 'needs_changing_on_leave', 'status')
radio_fields = {
'access_policy': admin.HORIZONTAL,
'status': admin.HORIZONTAL,
}
readonly_fields = ('created', 'last_read')
search_fields = ('name', 'description')
admin.site.register(Secret, SecretAdmin)
class SecretRevisionAdmin(admin.ModelAdmin):
fieldsets = (
(None, {
'fields': (
'secret',
'encrypted_data',
),
}),
(_("Audit"), {
'fields': (
'created',
'set_by',
'accessed_by',
),
}),
)
date_hierarchy = 'created'
list_display = ('secret', 'id', 'created')
readonly_fields = ('accessed_by', 'created', 'set_by')
search_fields = ('secret__name',)
admin.site.register(SecretRevision, SecretRevisionAdmin)
| gpl-3.0 |
rs2/pandas | pandas/tests/io/parser/test_na_values.py | 2 | 15082 | """
Tests that NA values are properly handled during
parsing for all of the parsers defined in parsers.py
"""
from io import StringIO
import numpy as np
import pytest
from pandas._libs.parsers import STR_NA_VALUES
from pandas import DataFrame, Index, MultiIndex
import pandas._testing as tm
def test_string_nas(all_parsers):
parser = all_parsers
data = """A,B,C
a,b,c
d,,f
,g,h
"""
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[["a", "b", "c"], ["d", np.nan, "f"], [np.nan, "g", "h"]],
columns=["A", "B", "C"],
)
tm.assert_frame_equal(result, expected)
def test_detect_string_na(all_parsers):
parser = all_parsers
data = """A,B
foo,bar
NA,baz
NaN,nan
"""
expected = DataFrame(
[["foo", "bar"], [np.nan, "baz"], [np.nan, np.nan]], columns=["A", "B"]
)
result = parser.read_csv(StringIO(data))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_values",
[
["-999.0", "-999"],
[-999, -999.0],
[-999.0, -999],
["-999.0"],
["-999"],
[-999.0],
[-999],
],
)
@pytest.mark.parametrize(
"data",
[
"""A,B
-999,1.2
2,-999
3,4.5
""",
"""A,B
-999,1.200
2,-999.000
3,4.500
""",
],
)
def test_non_string_na_values(all_parsers, data, na_values):
# see gh-3611: with an odd float format, we can't match
# the string "999.0" exactly but still need float matching
parser = all_parsers
expected = DataFrame([[np.nan, 1.2], [2.0, np.nan], [3.0, 4.5]], columns=["A", "B"])
result = parser.read_csv(StringIO(data), na_values=na_values)
tm.assert_frame_equal(result, expected)
def test_default_na_values(all_parsers):
_NA_VALUES = {
"-1.#IND",
"1.#QNAN",
"1.#IND",
"-1.#QNAN",
"#N/A",
"N/A",
"n/a",
"NA",
"<NA>",
"#NA",
"NULL",
"null",
"NaN",
"nan",
"-NaN",
"-nan",
"#N/A N/A",
"",
}
assert _NA_VALUES == STR_NA_VALUES
parser = all_parsers
nv = len(_NA_VALUES)
def f(i, v):
if i == 0:
buf = ""
elif i > 0:
buf = "".join([","] * i)
buf = f"{buf}{v}"
if i < nv - 1:
joined = "".join([","] * (nv - i - 1))
buf = f"{buf}{joined}"
return buf
data = StringIO("\n".join(f(i, v) for i, v in enumerate(_NA_VALUES)))
expected = DataFrame(np.nan, columns=range(nv), index=range(nv))
result = parser.read_csv(data, header=None)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("na_values", ["baz", ["baz"]])
def test_custom_na_values(all_parsers, na_values):
parser = all_parsers
data = """A,B,C
ignore,this,row
1,NA,3
-1.#IND,5,baz
7,8,NaN
"""
expected = DataFrame(
[[1.0, np.nan, 3], [np.nan, 5, np.nan], [7, 8, np.nan]], columns=["A", "B", "C"]
)
result = parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1])
tm.assert_frame_equal(result, expected)
def test_bool_na_values(all_parsers):
data = """A,B,C
True,False,True
NA,True,False
False,NA,True"""
parser = all_parsers
result = parser.read_csv(StringIO(data))
expected = DataFrame(
{
"A": np.array([True, np.nan, False], dtype=object),
"B": np.array([False, True, np.nan], dtype=object),
"C": [True, False, True],
}
)
tm.assert_frame_equal(result, expected)
def test_na_value_dict(all_parsers):
data = """A,B,C
foo,bar,NA
bar,foo,foo
foo,bar,NA
bar,foo,foo"""
parser = all_parsers
df = parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]})
expected = DataFrame(
{
"A": [np.nan, "bar", np.nan, "bar"],
"B": [np.nan, "foo", np.nan, "foo"],
"C": [np.nan, "foo", np.nan, "foo"],
}
)
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize(
"index_col,expected",
[
(
[0],
DataFrame({"b": [np.nan], "c": [1], "d": [5]}, index=Index([0], name="a")),
),
(
[0, 2],
DataFrame(
{"b": [np.nan], "d": [5]},
index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),
),
),
(
["a", "c"],
DataFrame(
{"b": [np.nan], "d": [5]},
index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),
),
),
],
)
def test_na_value_dict_multi_index(all_parsers, index_col, expected):
data = """\
a,b,c,d
0,NA,1,5
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), na_values=set(), index_col=index_col)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"kwargs,expected",
[
(
dict(),
DataFrame(
{
"A": ["a", "b", np.nan, "d", "e", np.nan, "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],
}
),
),
(
dict(na_values={"A": [], "C": []}, keep_default_na=False),
DataFrame(
{
"A": ["a", "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", "nan", "five", "", "seven"],
}
),
),
(
dict(na_values=["a"], keep_default_na=False),
DataFrame(
{
"A": [np.nan, "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", "nan", "five", "", "seven"],
}
),
),
(
dict(na_values={"A": [], "C": []}),
DataFrame(
{
"A": ["a", "b", np.nan, "d", "e", np.nan, "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],
}
),
),
],
)
def test_na_values_keep_default(all_parsers, kwargs, expected):
data = """\
A,B,C
a,1,one
b,2,two
,3,three
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), **kwargs)
tm.assert_frame_equal(result, expected)
def test_no_na_values_no_keep_default(all_parsers):
# see gh-4318: passing na_values=None and
# keep_default_na=False yields 'None" as a na_value
data = """\
A,B,C
a,1,None
b,2,two
,3,None
d,4,nan
e,5,five
nan,6,
g,7,seven
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), keep_default_na=False)
expected = DataFrame(
{
"A": ["a", "b", "", "d", "e", "nan", "g"],
"B": [1, 2, 3, 4, 5, 6, 7],
"C": ["None", "two", "None", "nan", "five", "", "seven"],
}
)
tm.assert_frame_equal(result, expected)
def test_no_keep_default_na_dict_na_values(all_parsers):
# see gh-19227
data = "a,b\n,2"
parser = all_parsers
result = parser.read_csv(
StringIO(data), na_values={"b": ["2"]}, keep_default_na=False
)
expected = DataFrame({"a": [""], "b": [np.nan]})
tm.assert_frame_equal(result, expected)
def test_no_keep_default_na_dict_na_scalar_values(all_parsers):
# see gh-19227
#
# Scalar values shouldn't cause the parsing to crash or fail.
data = "a,b\n1,2"
parser = all_parsers
df = parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False)
expected = DataFrame({"a": [1], "b": [np.nan]})
tm.assert_frame_equal(df, expected)
@pytest.mark.parametrize("col_zero_na_values", [113125, "113125"])
def test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_values):
# see gh-19227
data = """\
113125,"blah","/blaha",kjsdkj,412.166,225.874,214.008
729639,"qwer","",asdfkj,466.681,,252.373
"""
parser = all_parsers
expected = DataFrame(
{
0: [np.nan, 729639.0],
1: [np.nan, "qwer"],
2: ["/blaha", np.nan],
3: ["kjsdkj", "asdfkj"],
4: [412.166, 466.681],
5: ["225.874", ""],
6: [np.nan, 252.373],
}
)
result = parser.read_csv(
StringIO(data),
header=None,
keep_default_na=False,
na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values},
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_filter,row_data",
[
(True, [[1, "A"], [np.nan, np.nan], [3, "C"]]),
(False, [["1", "A"], ["nan", "B"], ["3", "C"]]),
],
)
def test_na_values_na_filter_override(all_parsers, na_filter, row_data):
data = """\
A,B
1,A
nan,B
3,C
"""
parser = all_parsers
result = parser.read_csv(StringIO(data), na_values=["B"], na_filter=na_filter)
expected = DataFrame(row_data, columns=["A", "B"])
tm.assert_frame_equal(result, expected)
def test_na_trailing_columns(all_parsers):
parser = all_parsers
data = """Date,Currency,Symbol,Type,Units,UnitPrice,Cost,Tax
2012-03-14,USD,AAPL,BUY,1000
2012-05-12,USD,SBUX,SELL,500"""
# Trailing columns should be all NaN.
result = parser.read_csv(StringIO(data))
expected = DataFrame(
[
["2012-03-14", "USD", "AAPL", "BUY", 1000, np.nan, np.nan, np.nan],
["2012-05-12", "USD", "SBUX", "SELL", 500, np.nan, np.nan, np.nan],
],
columns=[
"Date",
"Currency",
"Symbol",
"Type",
"Units",
"UnitPrice",
"Cost",
"Tax",
],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_values,row_data",
[
(1, [[np.nan, 2.0], [2.0, np.nan]]),
({"a": 2, "b": 1}, [[1.0, 2.0], [np.nan, np.nan]]),
],
)
def test_na_values_scalar(all_parsers, na_values, row_data):
# see gh-12224
parser = all_parsers
names = ["a", "b"]
data = "1,2\n2,1"
result = parser.read_csv(StringIO(data), names=names, na_values=na_values)
expected = DataFrame(row_data, columns=names)
tm.assert_frame_equal(result, expected)
def test_na_values_dict_aliasing(all_parsers):
parser = all_parsers
na_values = {"a": 2, "b": 1}
na_values_copy = na_values.copy()
names = ["a", "b"]
data = "1,2\n2,1"
expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)
result = parser.read_csv(StringIO(data), names=names, na_values=na_values)
tm.assert_frame_equal(result, expected)
tm.assert_dict_equal(na_values, na_values_copy)
def test_na_values_dict_col_index(all_parsers):
# see gh-14203
data = "a\nfoo\n1"
parser = all_parsers
na_values = {0: "foo"}
result = parser.read_csv(StringIO(data), na_values=na_values)
expected = DataFrame({"a": [np.nan, 1]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data,kwargs,expected",
[
(
str(2 ** 63) + "\n" + str(2 ** 63 + 1),
dict(na_values=[2 ** 63]),
DataFrame([str(2 ** 63), str(2 ** 63 + 1)]),
),
(str(2 ** 63) + ",1" + "\n,2", dict(), DataFrame([[str(2 ** 63), 1], ["", 2]])),
(str(2 ** 63) + "\n1", dict(na_values=[2 ** 63]), DataFrame([np.nan, 1])),
],
)
def test_na_values_uint64(all_parsers, data, kwargs, expected):
# see gh-14983
parser = all_parsers
result = parser.read_csv(StringIO(data), header=None, **kwargs)
tm.assert_frame_equal(result, expected)
def test_empty_na_values_no_default_with_index(all_parsers):
# see gh-15835
data = "a,1\nb,2"
parser = all_parsers
expected = DataFrame({"1": [2]}, index=Index(["b"], name="a"))
result = parser.read_csv(StringIO(data), index_col=0, keep_default_na=False)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"na_filter,index_data", [(False, ["", "5"]), (True, [np.nan, 5.0])]
)
def test_no_na_filter_on_index(all_parsers, na_filter, index_data):
# see gh-5239
#
# Don't parse NA-values in index unless na_filter=True
parser = all_parsers
data = "a,b,c\n1,,3\n4,5,6"
expected = DataFrame({"a": [1, 4], "c": [3, 6]}, index=Index(index_data, name="b"))
result = parser.read_csv(StringIO(data), index_col=[1], na_filter=na_filter)
tm.assert_frame_equal(result, expected)
def test_inf_na_values_with_int_index(all_parsers):
# see gh-17128
parser = all_parsers
data = "idx,col1,col2\n1,3,4\n2,inf,-inf"
# Don't fail with OverflowError with inf's and integer index column.
out = parser.read_csv(StringIO(data), index_col=[0], na_values=["inf", "-inf"])
expected = DataFrame(
{"col1": [3, np.nan], "col2": [4, np.nan]}, index=Index([1, 2], name="idx")
)
tm.assert_frame_equal(out, expected)
@pytest.mark.parametrize("na_filter", [True, False])
def test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter):
# see gh-20377
parser = all_parsers
data = "a,b,c\n1,,3\n4,5,6"
# na_filter=True --> missing value becomes NaN.
# na_filter=False --> missing value remains empty string.
empty = np.nan if na_filter else ""
expected = DataFrame({"a": ["1", "4"], "b": [empty, "5"], "c": ["3", "6"]})
result = parser.read_csv(StringIO(data), na_filter=na_filter, dtype=str)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, na_values",
[
("false,1\n,1\ntrue", None),
("false,1\nnull,1\ntrue", None),
("false,1\nnan,1\ntrue", None),
("false,1\nfoo,1\ntrue", "foo"),
("false,1\nfoo,1\ntrue", ["foo"]),
("false,1\nfoo,1\ntrue", {"a": "foo"}),
],
)
def test_cast_NA_to_bool_raises_error(all_parsers, data, na_values):
parser = all_parsers
msg = (
"(Bool column has NA values in column [0a])|"
"(cannot safely convert passed user dtype of "
"bool for object dtyped data in column 0)"
)
with pytest.raises(ValueError, match=msg):
parser.read_csv(
StringIO(data),
header=None,
names=["a", "b"],
dtype={"a": "bool"},
na_values=na_values,
)
def test_str_nan_dropped(all_parsers):
# see gh-21131
parser = all_parsers
data = """File: small.csv,,
10010010233,0123,654
foo,,bar
01001000155,4530,898"""
result = parser.read_csv(
StringIO(data),
header=None,
names=["col1", "col2", "col3"],
dtype={"col1": str, "col2": str, "col3": str},
).dropna()
expected = DataFrame(
{
"col1": ["10010010233", "01001000155"],
"col2": ["0123", "4530"],
"col3": ["654", "898"],
},
index=[1, 3],
)
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
Livefyre/flaubert | flaubert/preprocess.py | 1 | 19265 | import nltk
import unicodedata
import regex as re
import sys
import abc
import logging
import os
import cPickle as pickle
from pkg_resources import resource_filename
from bs4 import BeautifulSoup
from itertools import islice
from functools import partial
from nltk.corpus import stopwords
from nltk.stem import wordnet, PorterStemmer
from nltk import pos_tag
from joblib import Parallel, delayed
from pymaptools.io import write_json_line, PathArgumentParser, GzipFileType, open_gz
from flaubert.tokenize import RegexpFeatureTokenizer
from flaubert.urls import URLParser
from flaubert.conf import CONFIG
from flaubert.HTMLParser import HTMLParser, HTMLParseError
from flaubert.utils import treebank2wordnet, lru_wrap, pd_dict_iter
from flaubert.unicode_maps import EXTRA_TRANSLATE_MAP
from flaubert.punkt import PunktTrainer, PunktLanguageVars, PunktSentenceTokenizer
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger(__name__)
class Replacer(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def replace(self, text):
"""Calls regex's sub method with self as callable"""
@abc.abstractmethod
def replacen(self, text):
"""Calls regex's subn method with self as callable"""
class RepeatReplacer(Replacer):
"""Remove repeating characters from text
The default pattern only applies to non-decimal characters
>>> rep = RepeatReplacer(max_repeats=3)
>>> rep.replace(u"So many $100000 bills.......")
u'So many $100000 bills...'
"""
def __init__(self, pattern=u'[^\\d\\*]', max_repeats=3):
if max_repeats < 1:
raise ValueError("Invalid parameter value max_repeats={}"
.format(max_repeats))
prev = u'\\1' * max_repeats
pattern = u'(%s)' % pattern
regexp = re.compile(pattern + prev + u'+', re.UNICODE)
self.replace = partial(regexp.sub, prev)
self.replacen = partial(regexp.subn, prev)
def replace(self, text):
"""Remove repeating characters from text
Method definition needed only for abstract base class
(it is overwritten during init)
"""
pass
def replacen(self, text):
"""Remove repeating characters from text
while also returning number of substitutions made
Method definition needed only for abstract base class
(it is overwritten during init)
"""
pass
class GenericReplacer(Replacer):
__metaclass__ = abc.ABCMeta
def __init__(self, regexp):
self._re = regexp
@abc.abstractmethod
def __call__(self, match):
"""Override this to provide your own substitution method"""
def replace(self, text):
return self._re.sub(self, text)
def replacen(self, text):
return self._re.subn(self, text)
class InPlaceReplacer(GenericReplacer):
def __init__(self, replace_map=None):
if replace_map is None:
replace_map = dict()
_replacements = dict()
_regexes = list()
for idx, (key, val) in enumerate(replace_map.iteritems()):
_replacements[idx] = val
_regexes.append(u'({})'.format(key))
self._replacements = _replacements
super(InPlaceReplacer, self).__init__(re.compile(u'|'.join(_regexes), re.UNICODE | re.IGNORECASE))
def __call__(self, match):
lastindex = match.lastindex
if lastindex is None:
return u''
replacement = self._replacements[lastindex - 1]
matched_string = match.group(lastindex)
return replacement.get(matched_string.lower(), matched_string) \
if isinstance(replacement, dict) \
else replacement
class Translator(Replacer):
"""Replace certain characters
"""
def __init__(self, translate_mapping=None, translated=False):
if translated:
self._translate_map = dict((translate_mapping or {}).iteritems())
else:
self._translate_map = {ord(k): ord(v) for k, v in (translate_mapping or {}).iteritems()}
def add_inverse_map(self, inverse_mapping, translated=False):
replace_map = {}
for key, vals in (inverse_mapping or {}).iteritems():
for val in vals:
replace_map[val] = key
self.add_map(replace_map, translated=translated)
def add_map(self, mapping, translated=False):
replace_map = self._translate_map
if translated:
replace_map.update(mapping)
else:
for key, val in mapping.iteritems():
replace_map[ord(key)] = ord(val)
def replace(self, text):
"""Replace characters
"""
return text.translate(self._translate_map)
def replacen(self, text):
"""Replace characters
while also returning number of substitutions made
Method definition needed only for abstract base class
"""
pass
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
self.handled_starttags = []
self.handled_startendtags = []
self._new_lines = 0
def append_new_lines(self):
for _ in xrange(self._new_lines):
self.fed.append("\n")
self._new_lines = 0
def handle_data(self, data):
self.append_new_lines()
self.fed.append(data)
def handle_starttag(self, tag, attrs):
HTMLParser.handle_starttag(self, tag, attrs)
self.handled_starttags.append(tag)
if tag == u"br":
self._new_lines += 1
elif tag == u"p":
self._new_lines += 1
def handle_endtag(self, tag):
HTMLParser.handle_endtag(self, tag)
if tag == u"p":
self._new_lines += 1
def handle_startendtag(self, tag, attrs):
HTMLParser.handle_starttag(self, tag, attrs)
self.handled_startendtags.append(tag)
if tag == u"br":
self._new_lines += 1
def handle_entityref(self, name):
# Ignore HTML entities (already unescaped)
self.fed.append(u'&' + name)
def get_data(self):
self.append_new_lines()
return u''.join(self.fed)
class HTMLCleaner(object):
_remove_full_comment = partial(
(re.compile(ur"(?s)<!--(.*?)-->[\n]?", re.UNICODE)).sub, ur'\1')
_remove_partial_comment = partial(
(re.compile(ur"<!--", re.UNICODE)).sub, u"")
def __init__(self, strip_html=True, strip_html_comments=True):
self._strip_html = strip_html
self._strip_html_comments = strip_html_comments
def clean(self, html):
"""Remove HTML markup from the given string
"""
if self._strip_html_comments:
html = self._remove_full_comment(html)
html = self._remove_partial_comment(html)
if html and self._strip_html:
stripper = MLStripper()
try:
stripper.feed(html)
except HTMLParseError as err:
logging.exception(err)
else:
html = stripper.get_data().strip()
return html
def strip_html_bs(text):
"""
Use BeautifulSoup to strip off HTML but in such a way that <BR> and
<P> tags get rendered as new lines
"""
soup = BeautifulSoup(text)
fragments = []
for element in soup.recursiveChildGenerator():
if isinstance(element, basestring):
fragments.append(element.strip())
elif element.name == 'br':
fragments.append(u"\n")
elif element.name == 'p':
fragments.append(u"\n")
result = u"".join(fragments).strip()
return result
class SimpleSentenceTokenizer(object):
def __init__(self, lemmatizer=None, stemmer=None, url_parser=None,
unicode_form='NFKC', nltk_stop_words="english",
sentence_tokenizer=('nltk_data', 'tokenizers/punkt/english.pickle'),
max_char_repeats=3, lru_cache_size=50000, translate_map_inv=None,
replace_map=None, html_renderer='default', add_abbrev_types=None,
del_sent_starters=None):
self._unicode_normalize = partial(unicodedata.normalize, unicode_form)
self._replace_inplace = InPlaceReplacer(replace_map).replace \
if replace_map else lambda x: x
self._tokenize = RegexpFeatureTokenizer().tokenize
self._stopwords = frozenset(stopwords.words(nltk_stop_words))
self._url_parser = url_parser
self._sentence_tokenizer, self._sentence_tokenize = \
self.load_sent_tokenizer(sentence_tokenizer, add_abbrev_types, del_sent_starters)
self.sentence_tokenizer = None
self._lemmatize = lru_wrap(lemmatizer.lemmatize, lru_cache_size) if lemmatizer else None
self._stem = stemmer.stem if stemmer else None
self._pos_tag = pos_tag
self._replace_char_repeats = \
RepeatReplacer(max_repeats=max_char_repeats).replace \
if max_char_repeats > 0 else self._identity
# translation of Unicode characters
translator = Translator(EXTRA_TRANSLATE_MAP, translated=True)
translator.add_inverse_map(translate_map_inv, translated=False)
self._replace_chars = translator.replace
if html_renderer is None:
self.strip_html = lambda x: x
elif html_renderer == u'default':
self.strip_html = HTMLCleaner().clean
elif html_renderer == u'beautifulsoup':
self.strip_html = strip_html_bs
else:
raise ValueError('Invalid parameter value given for `html_renderer`')
# tokenize a dummy string b/c lemmatizer and/or other tools can take
# a while to initialize screwing up our attempts to measure performance
self.tokenize(u"dummy string")
@staticmethod
def load_sent_tokenizer(sentence_tokenizer, add_abbrev_types=None, del_sent_starters=None):
_sentence_tokenizer = None
_sentence_tokenize = lambda x: [x]
if sentence_tokenizer is not None:
if sentence_tokenizer[0] == 'nltk_data':
punkt = nltk.data.load(sentence_tokenizer[1])
# TODO: why was the (now commented-out) line below here?
# return punkt, punkt.tokenize
return punkt, punkt.sentences_from_text
elif sentence_tokenizer[0] == 'data':
tokenizer_path = os.path.join('..', 'data', sentence_tokenizer[1])
tokenizer_path = resource_filename(__name__, tokenizer_path)
if os.path.exists(tokenizer_path):
with open_gz(tokenizer_path, 'rb') as fhandle:
try:
punkt = pickle.load(fhandle)
except EOFError:
logging.warn("Could not load tokenizer from %s", tokenizer_path)
return _sentence_tokenizer, _sentence_tokenize
if add_abbrev_types:
punkt._params.abbrev_types = punkt._params.abbrev_types | set(add_abbrev_types)
if del_sent_starters:
punkt._params.sent_starters = punkt._params.sent_starters - set(del_sent_starters)
return punkt, punkt.sentences_from_text
else:
logging.warn("Tokenizer not found at %s", tokenizer_path)
else:
raise ValueError("Invalid sentence tokenizer class")
return _sentence_tokenizer, _sentence_tokenize
@staticmethod
def _identity(arg):
return arg
def unicode_normalize(self, text):
# 1. Normalize to specific Unicode form (also replaces ellipsis with
# periods)
text = self._unicode_normalize(text)
# 2. Replace certain chars such as n- and m-dashes
text = self._replace_inplace(text)
return text
def preprocess(self, text, lowercase=True):
# 1. Remove HTML
text = self.strip_html(text)
# 2. Normalize Unicode
text = self.unicode_normalize(text)
# 3. Replace certain characters
text = self._replace_chars(text)
# 4. whiteout URLs
text = self._url_parser.whiteout_urls(text)
# 5. Lowercase
if lowercase:
text = text.lower()
# 6. Reduce repeated characters to specified number (usually 3)
text = self._replace_char_repeats(text)
return text
def word_tokenize(self, text, lowercase=True, preprocess=True, remove_stopwords=False):
# 1. Misc. preprocessing
if preprocess:
text = self.preprocess(text, lowercase=lowercase)
elif lowercase:
text = text.lower()
# 2. Tokenize
words = self._tokenize(text)
# 3. Lemmatize or stem based on POS tags
if self._lemmatize:
final_words = []
lemmatize = self._lemmatize
for word, tag in self._pos_tag(words):
wordnet_tag = treebank2wordnet(tag)
if wordnet_tag is not None:
word = lemmatize(word, pos=wordnet_tag)
final_words.append(word)
words = final_words
elif self._stem:
stem = self._stem
words = [stem(word) for word in words]
# 4. Optionally remove stop words (false by default)
if remove_stopwords:
stop_words = self._stopwords
words = [word for word in words if word not in stop_words]
# 5. Return a list of words
return words
def sentence_tokenize(self, text, preprocess=True,
remove_stopwords=False):
if preprocess:
text = self.preprocess(text, lowercase=False)
sentences = []
for raw_sentence in self._sentence_tokenize(text):
if not raw_sentence:
continue
words = self.word_tokenize(
raw_sentence, preprocess=False, lowercase=True,
remove_stopwords=remove_stopwords)
if not words:
continue
sentences.append(words)
return sentences
tokenize = word_tokenize
def train_sentence_model(self, iterator, verbose=False, show_progress=1000):
reviews = []
for idx, review in enumerate(iterator, start=1):
if show_progress and idx % show_progress == 0:
logging.info("Processing review %d", idx)
review = self.preprocess(review, lowercase=False).strip()
if not review.endswith(u'.'):
review += u'.'
reviews.append(review)
text = u'\n\n'.join(reviews)
custom_lang_vars = PunktLanguageVars
custom_lang_vars.sent_end_chars = ('.', '?', '!')
# TODO: check if we need to manually specify common abbreviations
punkt = PunktTrainer(verbose=verbose, lang_vars=custom_lang_vars())
abbrev_sent = u'Start %s end.' % u' '.join(CONFIG['tokenizer']['add_abbrev_types'])
punkt.train(abbrev_sent, finalize=False)
punkt.train(text, finalize=False)
punkt.finalize_training()
params = punkt.get_params()
if self._sentence_tokenizer:
self._sentence_tokenizer._params = params
else:
model = PunktSentenceTokenizer()
model._params = params
self._sentence_tokenizer = model
self._sentence_tokenize = model.sentences_from_tokens
def train(self, iterator, verbose=False, show_progress=1000):
self.train_sentence_model(iterator, verbose=verbose, show_progress=show_progress)
def save_sentence_model(self, output_file):
pickle.dump(self._sentence_tokenizer, output_file, protocol=pickle.HIGHEST_PROTOCOL)
def registry(key):
"""
retrieves objects given keys from config
"""
if key is None:
return None
elif key == 'wordnet':
return wordnet.WordNetLemmatizer()
elif key == 'porter':
return PorterStemmer()
def tokenizer_builder():
return SimpleSentenceTokenizer(
lemmatizer=registry(CONFIG['preprocess']['lemmatizer']),
stemmer=registry(CONFIG['preprocess']['stemmer']),
url_parser=URLParser(),
**CONFIG['tokenizer'])
TOKENIZER = tokenizer_builder()
def get_sentences(field, row, **kwargs):
sentences = []
text = row[field]
for sentence in TOKENIZER.sentence_tokenize(text, **kwargs):
sentences.append(sentence)
row[field] = sentences
return row
def get_words(field, row, **kwargs):
text = row[field]
words = TOKENIZER.tokenize(text, **kwargs)
row[field] = words
return row
def get_review_iterator(args):
iterator = pd_dict_iter(args.input, chunksize=1000)
if args.limit:
iterator = islice(iterator, args.limit)
return iterator
def get_mapper_method(args):
if args.sentences:
mapper = get_sentences
else:
mapper = get_words
return mapper
def run_tokenize(args):
iterator = get_review_iterator(args)
mapper = get_mapper_method(args)
write_record = partial(write_json_line, args.output)
field = args.field
if args.n_jobs == 1:
# turn off parallelism
for row in iterator:
record = mapper(field, row)
write_record(record)
else:
# enable parallellism
for record in Parallel(n_jobs=args.n_jobs, verbose=10)(
delayed(mapper)(field, row) for row in iterator):
write_record(record)
def train_sentence_tokenizer(args):
field = args.field
iterator = (obj[field] for obj in get_review_iterator(args))
TOKENIZER.train(iterator, verbose=args.verbose)
TOKENIZER.save_sentence_model(args.output)
def parse_args(args=None):
parser = PathArgumentParser()
parser.add_argument('--input', type=GzipFileType('r'), default=[sys.stdin], nargs='*',
help='Input file (in TSV format, optionally compressed)')
parser.add_argument('--field', type=str, default='review',
help='Field name (Default: review)')
parser.add_argument('--limit', type=int, default=None,
help='Only process this many lines (for testing)')
parser.add_argument('--n_jobs', type=int, default=-1,
help="Number of jobs to run")
parser.add_argument('--output', type=GzipFileType('w'), default=sys.stdout,
help='Output file')
subparsers = parser.add_subparsers()
parser_tokenize = subparsers.add_parser('tokenize')
parser_tokenize.add_argument('--sentences', action='store_true',
help='split on sentences')
parser_tokenize.set_defaults(func=run_tokenize)
parser_train = subparsers.add_parser('train')
parser_train.add_argument('--verbose', action='store_true',
help='be verbose')
parser_train.set_defaults(func=train_sentence_tokenizer)
namespace = parser.parse_args(args)
return namespace
def run():
args = parse_args()
args.func(args)
if __name__ == "__main__":
run()
| mit |
fracting/depot_tools | testing_support/auto_stub.py | 53 | 2301 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__version__ = '1.0'
import collections
import inspect
import unittest
class AutoStubMixIn(object):
"""Automatically restores stubbed functions on unit test teardDown.
It's an extremely lightweight mocking class that doesn't require bookeeping.
"""
_saved = None
def mock(self, obj, member, mock):
self._saved = self._saved or collections.OrderedDict()
old_value = self._saved.setdefault(
obj, collections.OrderedDict()).setdefault(member, getattr(obj, member))
setattr(obj, member, mock)
return old_value
def tearDown(self):
"""Restore all the mocked members."""
if self._saved:
for obj, items in self._saved.iteritems():
for member, previous_value in items.iteritems():
setattr(obj, member, previous_value)
class SimpleMock(object):
"""Really simple manual class mock."""
def __init__(self, unit_test):
"""Do not call __init__ if you want to use the global call list to detect
ordering across different instances.
"""
self.calls = []
self.unit_test = unit_test
self.assertEqual = unit_test.assertEqual
def pop_calls(self):
"""Returns the list of calls up to date.
Good to do self.assertEqual(expected, mock.pop_calls()).
"""
calls = self.calls
self.calls = []
return calls
def check_calls(self, expected):
self.assertEqual(expected, self.pop_calls())
def _register_call(self, *args, **kwargs):
"""Registers the name of the caller function."""
caller_name = kwargs.pop('caller_name', None) or inspect.stack()[1][3]
str_args = ', '.join(repr(arg) for arg in args)
str_kwargs = ', '.join('%s=%r' % (k, v) for k, v in kwargs.iteritems())
self.calls.append('%s(%s)' % (
caller_name, ', '.join(filter(None, [str_args, str_kwargs]))))
class TestCase(unittest.TestCase, AutoStubMixIn):
"""Adds self.mock() and self.has_failed() to a TestCase."""
def tearDown(self):
AutoStubMixIn.tearDown(self)
unittest.TestCase.tearDown(self)
def has_failed(self):
"""Returns True if the test has failed."""
return not self._resultForDoCleanups.wasSuccessful()
| bsd-3-clause |
MoisesTedeschi/python | Scripts-Python/Modulos-Diversos/python-com-scrapy/Lib/encodings/koi8_t.py | 101 | 13193 | """ Python Character Mapping Codec koi8_t
"""
# http://ru.wikipedia.org/wiki/КОИ-8
# http://www.opensource.apple.com/source/libiconv/libiconv-4/libiconv/tests/KOI8-T.TXT
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-t',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u049b' # 0x80 -> CYRILLIC SMALL LETTER KA WITH DESCENDER
'\u0493' # 0x81 -> CYRILLIC SMALL LETTER GHE WITH STROKE
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0492' # 0x83 -> CYRILLIC CAPITAL LETTER GHE WITH STROKE
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\u04b3' # 0x8A -> CYRILLIC SMALL LETTER HA WITH DESCENDER
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u04b2' # 0x8C -> CYRILLIC CAPITAL LETTER HA WITH DESCENDER
'\u04b7' # 0x8D -> CYRILLIC SMALL LETTER CHE WITH DESCENDER
'\u04b6' # 0x8E -> CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
'\ufffe' # 0x8F -> UNDEFINED
'\u049a' # 0x90 -> CYRILLIC CAPITAL LETTER KA WITH DESCENDER
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\ufffe' # 0xA0 -> UNDEFINED
'\u04ef' # 0xA1 -> CYRILLIC SMALL LETTER U WITH MACRON
'\u04ee' # 0xA2 -> CYRILLIC CAPITAL LETTER U WITH MACRON
'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
'\xa4' # 0xA4 -> CURRENCY SIGN
'\u04e3' # 0xA5 -> CYRILLIC SMALL LETTER I WITH MACRON
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\ufffe' # 0xA8 -> UNDEFINED
'\ufffe' # 0xA9 -> UNDEFINED
'\ufffe' # 0xAA -> UNDEFINED
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\ufffe' # 0xAF -> UNDEFINED
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
'\ufffe' # 0xB4 -> UNDEFINED
'\u04e2' # 0xB5 -> CYRILLIC CAPITAL LETTER I WITH MACRON
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\ufffe' # 0xB8 -> UNDEFINED
'\u2116' # 0xB9 -> NUMERO SIGN
'\ufffe' # 0xBA -> UNDEFINED
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\ufffe' # 0xBC -> UNDEFINED
'\ufffe' # 0xBD -> UNDEFINED
'\ufffe' # 0xBE -> UNDEFINED
'\xa9' # 0xBF -> COPYRIGHT SIGN
'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
sloede/modm | modfileparser.py | 1 | 6189 | #!/usr/bin/env python
# Modm - Modules iMproved
# Copyright (C) 2013-2014 Michael Schlottke
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# System imports
import os
import shlex
# Project imports
from env import Env,EnvVariable
from basheval import BashEval
class ModfileParser:
"""
Class to parse module files and execute commands found in them.
"""
backup_prefix = 'MODM_BACKUP_'
def __init__(self, env=Env(), basheval=BashEval()):
"""Save arguments to class and initialize list of valid commands.
Arguments:
env -- object to handle environment variables
basheval -- object to convert commands to Bash evaluation strings
"""
# Save arguments
self.env = env
self.be = basheval
# Init commands
self.commands = dict()
self.init_commands()
# Init other members
self.do_unload = False
def init_commands(self):
"""Initialize all commands that are supported in module files."""
self.commands['prepend_path'] = lambda *x: self.cmd_prepend_variable(
*x,
kind='path')
self.commands['prepend_string'] = lambda *x: self.cmd_prepend_variable(
*x,
kind='string')
self.commands['print'] = self.cmd_print
self.commands['print_load'] = lambda *x: self.cmd_print(
*x,
unload=False)
self.commands['print_unload'] = lambda *x: self.cmd_print(
*x,
load=False)
self.commands['set'] = self.cmd_set
def cmd_prepend_variable(self, name, value, kind='string'):
"""Prepend variable `name` with `value`."""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name, kind=kind)
# Prepend value (or undo prepend)
self.env.variables[name].prepend(value, undo=self.do_unload)
def cmd_append_variable(self, name, value, kind='string'):
"""Append variable `name` with `value`."""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name, kind=kind)
# Append value (or undo append)
self.env.variables[name].append(value, undo=self.do_unload)
def cmd_print(self, message, load=True, unload=True):
"""Print `message`."""
if (load and not self.do_unload) or (unload and self.do_unload):
self.be.echo(message)
def cmd_set(self, name, value):
"""Set variable `name` to `value`.
Save backup of `name` if it exists already, and restore the
original value upon unloading.
"""
# Create variable if it does not exist yet
if not name in self.env.variables:
self.env.variables[name] = EnvVariable(name)
# Determine name of potential backup variable and create backup variable
# if it does not exist
backupname = self.backup_prefix + name
if backupname not in self.env.variables:
self.env.variables[backupname] = EnvVariable(backupname)
# If variable is to be set, check if it is already set and save backup
if not self.do_unload:
if self.env.variables[name].is_set():
self.env.variables[backupname].set_value(
self.env.variables[name].get_value())
self.env.variables[name].set_value(value)
# If variable is to be unset, check if backup variable exists and
# restore it
else:
if self.env.variables[backupname].is_set():
self.env.variables[name].set_value(
self.env.variables[backupname].get_value())
self.env.variables[backupname].unset()
else:
self.env.variables[name].unset()
def load(self, modfile):
"""Load module file `modfile`."""
self.do_unload = False
return self.parse(modfile)
def unload(self, modfile):
"""Unload module file `modfile`."""
self.do_unload = True
return self.parse(modfile)
def parse(self, modfile):
"""Parse module file `modfile` and execute commands that are found.
Return true if parsing was successful, otherwise false."""
# Return without doing anything if file is not found
if not os.path.isfile(modfile):
return
# Read module file
with open(modfile, 'r') as f:
lines = f.readlines()
# Try to parse each line into shell tokens or die
try:
splitlines = [shlex.split(line) for line in lines]
except Exception as e:
self.be.error("Bad syntax in module file '{mf}': {e} ({n})".format(
mf=modfile, e=e, n=type(e).__name__))
return False
# Parse each line indicidually
for tokens in splitlines:
# Skip line if there were no tokens
if len(tokens) == 0:
continue
# First token is command, rest (if existing) are arguments
cmd = tokens[0]
args = tokens[1:]
# If command exists, execute it while providing the arguments from
# the file
if cmd in self.commands:
self.commands[cmd](*args)
# Return true to indicate that nothing was wrong
return True
| gpl-2.0 |
tambetm/neon | neon/optimizers/learning_rule.py | 2 | 2780 | # ----------------------------------------------------------------------------
# Copyright 2014 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Generic parent class used to control how updates are applied to coefficients
i.e. how the learning should proceed.
"""
from neon.util.param import opt_param
import logging
import numpy as np
logger = logging.getLogger(__name__)
class LearningRule(object):
"""
Base object for applying learning rule on the parameters to be updated
Attributes:
name (str): Used to identify this LearningRule when logging.
batch_size (int): Number of examples presented at this iteration
"""
def __init__(self, name, lr_params):
self.name = name
opt_param(self, ['velocity_dtype', 'param_dtype', 'gradient_dtype'],
np.float32)
def initialize(self, backend):
self.backend = backend
def __str__(self):
be_nm = ''
if hasattr(self, 'backend'):
be_nm = ", utilizing {} backend".format(
self.backend.__class__.__name__)
return ("LearningRule {upd_nm}: {upd_tp} upd_rl{be_nm}\n\t".format(
upd_nm=self.name, upd_tp=self.__class__.__name__, be_nm=be_nm))
def allocate_state(self, params):
pass
def set_pretrain_mode(self, pretrain_mode):
pass
def apply_rule(self, params, updates, epoch):
raise NotImplementedError()
def get_params(self):
np_params = dict()
for p in self.param_names:
if hasattr(self, p):
p_list = getattr(self, p)
plr = p + self.name
np_params[plr] = []
for p_tensor in p_list:
np_params[plr].append(np.array(
p_tensor.asnumpyarray(), dtype=p_tensor.dtype).reshape(
p_tensor.shape))
return np_params
def set_params(self, params_dict):
for p in self.param_names:
plr = p + self.name
if plr in params_dict:
for i in range(len(params_dict[plr])):
getattr(self, p)[i][:] = params_dict[plr][i]
| apache-2.0 |
cp16net/trove | trove/tests/unittests/guestagent/test_dbaas.py | 1 | 144181 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import os
import subprocess
import tempfile
import time
from uuid import uuid4
from mock import ANY
from mock import DEFAULT
from mock import MagicMock
from mock import Mock
from mock import patch
from mock import PropertyMock
from oslo_utils import netutils
import sqlalchemy
import testtools
from testtools.matchers import Equals
from testtools.matchers import Is
from testtools.matchers import Not
from trove.common import cfg
from trove.common.exception import BadRequest
from trove.common.exception import GuestError
from trove.common.exception import PollTimeOut
from trove.common.exception import ProcessExecutionError
from trove.common import instance as rd_instance
from trove.common import utils
from trove.conductor import api as conductor_api
from trove.guestagent.common import operating_system
from trove.guestagent.common.operating_system import FileMode
from trove.guestagent.datastore.experimental.cassandra import (
service as cass_service)
from trove.guestagent.datastore.experimental.cassandra import (
system as cass_system)
from trove.guestagent.datastore.experimental.couchbase import (
service as couchservice)
from trove.guestagent.datastore.experimental.couchdb import (
service as couchdb_service)
from trove.guestagent.datastore.experimental.db2 import (
service as db2service)
from trove.guestagent.datastore.experimental.mongodb import (
service as mongo_service)
from trove.guestagent.datastore.experimental.mongodb import (
system as mongo_system)
from trove.guestagent.datastore.experimental.redis import service as rservice
from trove.guestagent.datastore.experimental.redis.service import RedisApp
from trove.guestagent.datastore.experimental.redis import system as RedisSystem
from trove.guestagent.datastore.experimental.vertica import (
system as vertica_system)
from trove.guestagent.datastore.experimental.vertica.service import (
VerticaAppStatus)
from trove.guestagent.datastore.experimental.vertica.service import VerticaApp
import trove.guestagent.datastore.mysql.service as dbaas
from trove.guestagent.datastore.mysql.service import KeepAliveConnection
from trove.guestagent.datastore.mysql.service import MySqlAdmin
from trove.guestagent.datastore.mysql.service import MySqlApp
from trove.guestagent.datastore.mysql.service import MySqlAppStatus
from trove.guestagent.datastore.mysql.service import MySqlRootAccess
from trove.guestagent.datastore.service import BaseDbStatus
from trove.guestagent.db import models
from trove.guestagent import dbaas as dbaas_sr
from trove.guestagent.dbaas import get_filesystem_volume_stats
from trove.guestagent.dbaas import to_gb
from trove.guestagent import pkg
from trove.guestagent.volume import VolumeDevice
from trove.instance.models import InstanceServiceStatus
from trove.tests.unittests.util import util
CONF = cfg.CONF
"""
Unit tests for the classes and functions in dbaas.py.
"""
FAKE_DB = {"_name": "testDB", "_character_set": "latin2",
"_collate": "latin2_general_ci"}
FAKE_DB_2 = {"_name": "testDB2", "_character_set": "latin2",
"_collate": "latin2_general_ci"}
FAKE_USER = [{"_name": "random", "_password": "guesswhat",
"_host": "%", "_databases": [FAKE_DB]}]
conductor_api.API.get_client = Mock()
conductor_api.API.heartbeat = Mock()
class FakeAppStatus(BaseDbStatus):
def __init__(self, id, status):
self.id = id
self.next_fake_status = status
def _get_actual_db_status(self):
return self.next_fake_status
def set_next_status(self, next_status):
self.next_fake_status = next_status
def _is_query_router(self):
return False
class DbaasTest(testtools.TestCase):
def setUp(self):
super(DbaasTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_utils_execute = dbaas.utils.execute
def tearDown(self):
super(DbaasTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.utils.execute = self.orig_utils_execute
@patch.object(operating_system, 'remove')
def test_clear_expired_password(self, mock_remove):
secret_content = ("# The random password set for the "
"root user at Wed May 14 14:06:38 2014 "
"(local time): somepassword")
with patch.object(dbaas.utils, 'execute',
return_value=(secret_content, None)):
dbaas.clear_expired_password()
self.assertEqual(2, dbaas.utils.execute.call_count)
self.assertEqual(1, mock_remove.call_count)
@patch.object(operating_system, 'remove')
def test_no_secret_content_clear_expired_password(self, mock_remove):
with patch.object(dbaas.utils, 'execute', return_value=('', None)):
dbaas.clear_expired_password()
self.assertEqual(1, dbaas.utils.execute.call_count)
mock_remove.assert_not_called()
@patch.object(operating_system, 'remove')
def test_fail_password_update_content_clear_expired_password(self,
mock_remove):
secret_content = ("# The random password set for the "
"root user at Wed May 14 14:06:38 2014 "
"(local time): somepassword")
with patch.object(dbaas.utils, 'execute',
side_effect=[(secret_content, None),
ProcessExecutionError]):
dbaas.clear_expired_password()
self.assertEqual(2, dbaas.utils.execute.call_count)
mock_remove.assert_not_called()
@patch.object(operating_system, 'remove')
@patch.object(dbaas.utils, 'execute', side_effect=ProcessExecutionError)
def test_fail_retrieve_secret_content_clear_expired_password(self,
mock_execute,
mock_remove):
dbaas.clear_expired_password()
self.assertEqual(1, mock_execute.call_count)
mock_remove.assert_not_called()
def test_get_auth_password(self):
dbaas.utils.execute_with_timeout = Mock(
return_value=("password ", None))
password = dbaas.get_auth_password()
self.assertEqual("password", password)
def test_get_auth_password_error(self):
dbaas.utils.execute_with_timeout = Mock(
return_value=("password", "Error"))
self.assertRaises(RuntimeError, dbaas.get_auth_password)
def test_service_discovery(self):
with patch.object(os.path, 'isfile', return_value=True):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.assertIsNotNone(mysql_service['cmd_start'])
self.assertIsNotNone(mysql_service['cmd_enable'])
def test_load_mysqld_options(self):
output = "mysqld would've been started with the these args:\n"\
"--user=mysql --port=3306 --basedir=/usr "\
"--tmpdir=/tmp --skip-external-locking"
with patch.object(os.path, 'isfile', return_value=True):
dbaas.utils.execute = Mock(return_value=(output, None))
options = dbaas.load_mysqld_options()
self.assertEqual(5, len(options))
self.assertEqual(["mysql"], options["user"])
self.assertEqual(["3306"], options["port"])
self.assertEqual(["/usr"], options["basedir"])
self.assertEqual(["/tmp"], options["tmpdir"])
self.assertTrue("skip-external-locking" in options)
def test_load_mysqld_options_contains_plugin_loads_options(self):
output = ("mysqld would've been started with the these args:\n"
"--plugin-load=blackhole=ha_blackhole.so "
"--plugin-load=federated=ha_federated.so")
with patch.object(os.path, 'isfile', return_value=True):
dbaas.utils.execute = Mock(return_value=(output, None))
options = dbaas.load_mysqld_options()
self.assertEqual(1, len(options))
self.assertEqual(["blackhole=ha_blackhole.so",
"federated=ha_federated.so"],
options["plugin-load"])
@patch.object(os.path, 'isfile', return_value=True)
def test_load_mysqld_options_error(self, mock_exists):
dbaas.utils.execute = Mock(side_effect=ProcessExecutionError())
self.assertFalse(dbaas.load_mysqld_options())
def test_get_datadir(self):
cnf_value = '[mysqld]\ndatadir=/var/lib/mysql/data'
with patch.object(dbaas, 'read_mycnf', Mock(return_value=cnf_value)):
self.assertEqual('/var/lib/mysql/data',
dbaas.get_datadir(reset_cache=True))
class ResultSetStub(object):
def __init__(self, rows):
self._rows = rows
def __iter__(self):
return self._rows.__iter__()
@property
def rowcount(self):
return len(self._rows)
def __repr__(self):
return self._rows.__repr__()
class MySqlAdminMockTest(testtools.TestCase):
def tearDown(self):
super(MySqlAdminMockTest, self).tearDown()
def test_list_databases(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute',
return_value=ResultSetStub(
[('db1', 'utf8', 'utf8_bin'),
('db2', 'utf8', 'utf8_bin'),
('db3', 'utf8', 'utf8_bin')])):
databases, next_marker = MySqlAdmin().list_databases(limit=10)
self.assertThat(next_marker, Is(None))
self.assertThat(len(databases), Is(3))
class MySqlAdminTest(testtools.TestCase):
def setUp(self):
super(MySqlAdminTest, self).setUp()
self.orig_get_engine = dbaas.get_engine
self.orig_LocalSqlClient = dbaas.LocalSqlClient
self.orig_LocalSqlClient_enter = dbaas.LocalSqlClient.__enter__
self.orig_LocalSqlClient_exit = dbaas.LocalSqlClient.__exit__
self.orig_LocalSqlClient_execute = dbaas.LocalSqlClient.execute
self.orig_MySQLUser_is_valid_user_name = (
models.MySQLUser._is_valid_user_name)
dbaas.get_engine = MagicMock(name='get_engine')
dbaas.LocalSqlClient = Mock
dbaas.LocalSqlClient.__enter__ = Mock()
dbaas.LocalSqlClient.__exit__ = Mock()
dbaas.LocalSqlClient.execute = Mock()
self.mySqlAdmin = MySqlAdmin()
def tearDown(self):
super(MySqlAdminTest, self).tearDown()
dbaas.get_engine = self.orig_get_engine
dbaas.LocalSqlClient = self.orig_LocalSqlClient
dbaas.LocalSqlClient.__enter__ = self.orig_LocalSqlClient_enter
dbaas.LocalSqlClient.__exit__ = self.orig_LocalSqlClient_exit
dbaas.LocalSqlClient.execute = self.orig_LocalSqlClient_execute
models.MySQLUser._is_valid_user_name = (
self.orig_MySQLUser_is_valid_user_name)
def test__associate_dbs(self):
db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"},
{"grantee": "'test_user'@'%'", "table_schema": "db2"},
{"grantee": "'test_user'@'%'", "table_schema": "db3"},
{"grantee": "'test_user1'@'%'", "table_schema": "db1"},
{"grantee": "'test_user1'@'%'", "table_schema": "db3"}]
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.databases = []
expected = ("SELECT grantee, table_schema FROM "
"information_schema.SCHEMA_PRIVILEGES WHERE privilege_type"
" != 'USAGE' GROUP BY grantee, table_schema;")
with patch.object(dbaas.LocalSqlClient, 'execute',
Mock(return_value=db_result)):
self.mySqlAdmin._associate_dbs(user)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
self.assertEqual(3, len(user.databases))
self.assertEqual(expected, args[0].text,
"Associate database queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_change_passwords(self):
user = [{"name": "test_user", "host": "%", "password": "password"}]
self.mySqlAdmin.change_passwords(user)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("UPDATE mysql.user SET Password="
"PASSWORD('password') WHERE User = 'test_user' "
"AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Change password queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_password(self):
db_result = [{"grantee": "'test_user'@'%'", "table_schema": "db1"},
{"grantee": "'test_user'@'%'", "table_schema": "db2"}]
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"password": "password"}
with patch.object(dbaas.LocalSqlClient, 'execute',
Mock(return_value=db_result)):
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%',
user_attrs)
self.assertEqual(0,
self.mySqlAdmin.grant_access.call_count)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET Password="
"PASSWORD('password') WHERE User = 'test_user' "
"AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_name(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"name": "new_name"}
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%', user_attrs)
self.mySqlAdmin.grant_access.assert_called_with(
'new_name', '%', set([]))
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET User='new_name' "
"WHERE User = 'test_user' AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_update_attributes_host(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user_attrs = {"host": "new_host"}
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
with patch.object(self.mySqlAdmin, 'grant_access'):
self.mySqlAdmin.update_attributes('test_user', '%', user_attrs)
self.mySqlAdmin.grant_access.assert_called_with(
'test_user', 'new_host', set([]))
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("UPDATE mysql.user SET Host='new_host' "
"WHERE User = 'test_user' AND Host = '%';")
self.assertEqual(expected, args[0].text,
"Update attributes queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_create_database(self):
databases = []
databases.append(FAKE_DB)
self.mySqlAdmin.create_database(databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
self.assertEqual(1, dbaas.LocalSqlClient.execute.call_count,
"The client object was not called exactly once, " +
"it was called %d times"
% dbaas.LocalSqlClient.execute.call_count)
def test_create_database_more_than_1(self):
databases = []
databases.append(FAKE_DB)
databases.append(FAKE_DB_2)
self.mySqlAdmin.create_database(databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
args, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
expected = ("CREATE DATABASE IF NOT EXISTS "
"`testDB2` CHARACTER SET = 'latin2' "
"COLLATE = 'latin2_general_ci';")
self.assertEqual(expected, args[0].text,
"Create database queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count,
"The client object was not called exactly twice, " +
"it was called %d times"
% dbaas.LocalSqlClient.execute.call_count)
def test_create_database_no_db(self):
databases = []
self.mySqlAdmin.create_database(databases)
self.assertFalse(dbaas.LocalSqlClient.execute.called,
"The client object was called when it wasn't " +
"supposed to")
def test_delete_database(self):
database = {"_name": "testDB"}
self.mySqlAdmin.delete_database(database)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = "DROP DATABASE `testDB`;"
self.assertEqual(expected, args[0].text,
"Delete database queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_delete_user(self):
user = {"_name": "testUser", "_host": None}
self.mySqlAdmin.delete_user(user)
# For some reason, call_args is None.
call_args = dbaas.LocalSqlClient.execute.call_args
if call_args is not None:
args, _ = call_args
expected = "DROP USER `testUser`@`%`;"
self.assertEqual(expected, args[0].text,
"Delete user queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_create_user(self):
self.mySqlAdmin.create_user(FAKE_USER)
access_grants_expected = ("GRANT ALL PRIVILEGES ON `testDB`.* TO "
"`random`@`%` IDENTIFIED BY 'guesswhat';")
create_user_expected = ("GRANT USAGE ON *.* TO `random`@`%` "
"IDENTIFIED BY 'guesswhat';")
create_user, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
self.assertEqual(create_user_expected, create_user[0].text,
"Create user queries are not the same")
access_grants, _ = dbaas.LocalSqlClient.execute.call_args_list[1]
self.assertEqual(access_grants_expected, access_grants[0].text,
"Create user queries are not the same")
self.assertEqual(2, dbaas.LocalSqlClient.execute.call_count)
def test_list_databases(self):
self.mySqlAdmin.list_databases()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
def test_list_databases_with_limit(self):
limit = 2
self.mySqlAdmin.list_databases(limit)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertTrue("LIMIT " + str(limit + 1) in args[0].text)
def test_list_databases_with_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_databases(marker=marker)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND schema_name > '" + marker + "'" in args[0].text)
def test_list_databases_with_include_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_databases(marker=marker, include_marker=True)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT schema_name as name,",
"default_character_set_name as charset,",
"default_collation_name as collation",
"FROM information_schema.schemata",
("schema_name NOT IN ('" + "', '".join(CONF.ignore_dbs) +
"')"),
"ORDER BY schema_name ASC",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue(("AND schema_name >= '%s'" % marker) in args[0].text)
def test_list_users(self):
self.mySqlAdmin.list_users()
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertFalse("AND Marker > '" in args[0].text)
def test_list_users_with_limit(self):
limit = 2
self.mySqlAdmin.list_users(limit)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
("LIMIT " + str(limit + 1)),
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
def test_list_users_with_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_users(marker=marker)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host, Marker",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND Marker > '" + marker + "'" in args[0].text)
def test_list_users_with_include_marker(self):
marker = "aMarker"
self.mySqlAdmin.list_users(marker=marker, include_marker=True)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost'",
"ORDER BY User",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertFalse("LIMIT " in args[0].text)
self.assertTrue("AND Marker >= '" + marker + "'" in args[0].text)
@patch.object(dbaas.MySqlAdmin, '_associate_dbs')
def test_get_user(self, mock_associate_dbs):
"""
Unit tests for mySqlAdmin.get_user.
This test case checks if the sql query formed by the get_user method
is correct or not by checking with expected query.
"""
username = "user1"
hostname = "%"
user = [{"User": "user1", "Host": "%", 'Password': 'some_thing'}]
dbaas.LocalSqlClient.execute.return_value.fetchall = Mock(
return_value=user)
self.mySqlAdmin.get_user(username, hostname)
args, _ = dbaas.LocalSqlClient.execute.call_args
expected = ["SELECT User, Host",
"FROM mysql.user",
"WHERE Host != 'localhost' AND User = 'user1'",
"ORDER BY User, Host",
]
for text in expected:
self.assertTrue(text in args[0].text, "%s not in query." % text)
self.assertEqual(1, mock_associate_dbs.call_count)
def test_fail_get_user(self):
username = "os_admin"
hostname = "host"
self.assertRaisesRegexp(BadRequest, "Username os_admin is not valid",
self.mySqlAdmin.get_user, username, hostname)
def test_grant_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['db1']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.grant_access('test_user', '%', databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("GRANT ALL PRIVILEGES ON `db1`.* TO `test_user`@`%` "
"IDENTIFIED BY PASSWORD 'some_password';")
self.assertEqual(expected, args[0].text,
"Grant access queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_fail_grant_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['mysql']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.grant_access('test_user', '%', databases)
# since mysql is not a database to be provided access to,
# testing that executed was not called in grant access.
dbaas.LocalSqlClient.execute.assert_not_called()
def test_is_root_enabled(self):
self.mySqlAdmin.is_root_enabled()
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("SELECT User FROM mysql.user WHERE "
"User = 'root' AND Host != 'localhost';")
self.assertEqual(expected, args[0].text,
"Find root enabled queries are not the same")
self.assertTrue(dbaas.LocalSqlClient.execute.called,
"The client object was not called")
def test_revoke_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.password = 'some_password'
databases = ['db1']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
self.mySqlAdmin.revoke_access('test_usr', '%', databases)
args, _ = dbaas.LocalSqlClient.execute.call_args_list[0]
expected = ("REVOKE ALL ON `['db1']`.* FROM `test_user`@`%`;")
self.assertEqual(expected, args[0].text,
"Revoke access queries are not the same")
def test_list_access(self):
user = MagicMock()
user.name = "test_user"
user.host = "%"
user.databases = ['db1', 'db2']
with patch.object(self.mySqlAdmin, '_get_user', return_value=user):
databases = self.mySqlAdmin.list_access('test_usr', '%')
self.assertEqual(2, len(databases),
"List access queries are not the same")
class MySqlAppTest(testtools.TestCase):
def setUp(self):
super(MySqlAppTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_time_sleep = time.sleep
self.orig_unlink = os.unlink
self.orig_get_auth_password = dbaas.get_auth_password
self.orig_service_discovery = operating_system.service_discovery
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.mySqlApp = MySqlApp(self.appStatus)
mysql_service = {'cmd_start': Mock(),
'cmd_stop': Mock(),
'cmd_enable': Mock(),
'cmd_disable': Mock(),
'bin': Mock()}
operating_system.service_discovery = Mock(
return_value=mysql_service)
time.sleep = Mock()
os.unlink = Mock()
dbaas.get_auth_password = Mock()
self.mock_client = Mock()
self.mock_execute = Mock()
self.mock_client.__enter__ = Mock()
self.mock_client.__exit__ = Mock()
self.mock_client.__enter__.return_value.execute = self.mock_execute
def tearDown(self):
super(MySqlAppTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
time.sleep = self.orig_time_sleep
os.unlink = self.orig_unlink
operating_system.service_discovery = self.orig_service_discovery
dbaas.get_auth_password = self.orig_get_auth_password
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def mysql_starts_successfully(self):
def start(update_db=False):
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.start_mysql.side_effect = start
def mysql_starts_unsuccessfully(self):
def start():
raise RuntimeError("MySQL failed to start!")
self.mySqlApp.start_mysql.side_effect = start
def mysql_stops_successfully(self):
def stop():
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db.side_effect = stop
def mysql_stops_unsuccessfully(self):
def stop():
raise RuntimeError("MySQL failed to stop!")
self.mySqlApp.stop_db.side_effect = stop
def test_stop_mysql(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_mysql_with_db_update(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
@patch.object(utils, 'execute_with_timeout')
def test_stop_mysql_do_not_start_on_reboot(self, mock_execute):
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mySqlApp.stop_db(True, True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
self.assertEqual(2, mock_execute.call_count)
def test_stop_mysql_error(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.mySqlApp.stop_db)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_stop_mysql_key_error(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp.stop_db)
self.assertEqual(0, mock_execute.call_count)
def test_restart_is_successful(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
self.mySqlApp.restart()
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
def test_restart_mysql_wont_start_up(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mysql_stops_unsuccessfully()
self.mysql_starts_unsuccessfully()
self.assertRaises(RuntimeError, self.mySqlApp.restart)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_wipe_ib_logfiles_error(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(ProcessExecutionError,
self.mySqlApp.wipe_ib_logfiles)
def test_start_mysql(self):
dbaas.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp._enable_mysql_on_boot = Mock()
self.mySqlApp.start_mysql()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_mysql_with_db_update(self):
dbaas.utils.execute_with_timeout = Mock()
self.mySqlApp._enable_mysql_on_boot = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mySqlApp.start_mysql(update_db=True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
def test_start_mysql_runs_forever(self):
dbaas.utils.execute_with_timeout = Mock()
self.mySqlApp._enable_mysql_on_boot = Mock()
self.mySqlApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_start_mysql_error(self):
self.mySqlApp._enable_mysql_on_boot = Mock()
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(RuntimeError, self.mySqlApp.start_mysql)
def test_start_db_with_conf_changes(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_starts_successfully()
self.appStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
self.mySqlApp.start_db_with_conf_changes(Mock())
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assertEqual(rd_instance.ServiceStatuses.RUNNING,
self.appStatus._get_actual_db_status())
def test_start_db_with_conf_changes_mysql_is_running(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp._write_mycnf = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
self.mySqlApp.start_db_with_conf_changes,
Mock())
def test_remove_overrides(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
dbaas.utils.execute_with_timeout = mocked
self.assertRaises(ProcessExecutionError, self.mySqlApp.start_mysql)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'remove')
@patch.object(dbaas, 'get_auth_password', return_value='some_password')
@patch.object(dbaas.MySqlApp, '_write_config_overrides')
def test_reset_configuration(self, mock_write_overrides,
mock_get_auth_password, mock_remove,
mock_move):
configuration = {'config_contents': 'some junk'}
self.mySqlApp.reset_configuration(configuration=configuration)
self.assertEqual(1, mock_get_auth_password.call_count)
self.assertEqual(2, mock_move.call_count)
self.assertEqual(2, mock_remove.call_count)
self.assertEqual(0, mock_write_overrides.call_count)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'remove')
@patch.object(dbaas.MySqlApp, '_write_config_overrides')
def test__write_mycnf(self, mock_write_overrides, mock_remove, mock_move):
self.mySqlApp._write_mycnf('some_password', 'some junk', 'something')
self.assertEqual(2, mock_move.call_count)
self.assertEqual(2, mock_remove.call_count)
self.assertEqual(1, mock_write_overrides.call_count)
def test_mysql_error_in_write_config_verify_unlink(self):
configuration = {'config_contents': 'some junk'}
dbaas.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('something')))
self.assertRaises(ProcessExecutionError,
self.mySqlApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, dbaas.utils.execute_with_timeout.call_count)
self.assertEqual(1, os.unlink.call_count)
self.assertEqual(1, dbaas.get_auth_password.call_count)
def test_mysql_error_in_write_config(self):
configuration = {'config_contents': 'some junk'}
dbaas.utils.execute_with_timeout = (
Mock(side_effect=ProcessExecutionError('something')))
self.assertRaises(ProcessExecutionError,
self.mySqlApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, dbaas.utils.execute_with_timeout.call_count)
self.assertEqual(1, dbaas.get_auth_password.call_count)
@patch.object(utils, 'execute_with_timeout')
def test__enable_mysql_on_boot(self, mock_execute):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.mySqlApp._enable_mysql_on_boot()
self.assertEqual(1, mock_execute.call_count)
mock_execute.assert_called_with(mysql_service['cmd_enable'],
shell=True)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_fail__enable_mysql_on_boot(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp._enable_mysql_on_boot)
self.assertEqual(0, mock_execute.call_count)
@patch.object(utils, 'execute_with_timeout')
def test__disable_mysql_on_boot(self, mock_execute):
mysql_service = dbaas.operating_system.service_discovery(["mysql"])
self.mySqlApp._disable_mysql_on_boot()
self.assertEqual(1, mock_execute.call_count)
mock_execute.assert_called_with(mysql_service['cmd_disable'],
shell=True)
@patch.object(operating_system, 'service_discovery',
side_effect=KeyError('error'))
@patch.object(utils, 'execute_with_timeout')
def test_fail__disable_mysql_on_boot(self, mock_execute, mock_service):
self.assertRaisesRegexp(RuntimeError, 'Service is not discovered.',
self.mySqlApp._disable_mysql_on_boot)
self.assertEqual(0, mock_execute.call_count)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'chmod')
@patch.object(utils, 'execute_with_timeout')
def test_update_overrides(self, mock_execute, mock_chmod, mock_move):
override_value = 'something'
self.mySqlApp.update_overrides(override_value)
with open(dbaas.MYCNF_OVERRIDES_TMP, 'r') as test_file:
test_data = test_file.read()
self.assertEqual(override_value, test_data)
mock_chmod.assert_called_with(dbaas.MYCNF_OVERRIDES,
dbaas.FileMode.SET_GRP_RW_OTH_R,
as_root=True)
mock_move.assert_called_with(dbaas.MYCNF_OVERRIDES_TMP,
dbaas.MYCNF_OVERRIDES, as_root=True)
# Remove the residual file
os.remove(dbaas.MYCNF_OVERRIDES_TMP)
@patch.object(os.path, 'exists', return_value=True)
@patch.object(operating_system, 'remove')
def test_remove_override(self, mock_remove, mock_exists):
self.mySqlApp.remove_overrides()
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(1, mock_exists.call_count)
mock_remove.assert_called_once_with(ANY, as_root=True)
@patch.object(operating_system, 'move')
@patch.object(operating_system, 'chmod')
def test_write_replication_source_overrides(self, mock_chmod, mock_move):
self.mySqlApp.write_replication_source_overrides('something')
self.assertEqual(1, mock_move.call_count)
self.assertEqual(1, mock_chmod.call_count)
@patch.object(dbaas.MySqlApp, '_write_replication_overrides')
def test_write_replication_replica_overrides(self, mock_write_overrides):
self.mySqlApp.write_replication_replica_overrides('something')
self.assertEqual(1, mock_write_overrides.call_count)
@patch.object(os.path, 'exists', return_value=True)
@patch.object(operating_system, 'remove')
def test_remove_replication_source_overrides(self, mock_remove, mock_exists
):
self.mySqlApp.remove_replication_source_overrides()
self.assertEqual(1, mock_remove.call_count)
self.assertEqual(1, mock_exists.call_count)
@patch.object(dbaas.MySqlApp, '_remove_replication_overrides')
def test_remove_replication_replica_overrides(self, mock_remove_overrides):
self.mySqlApp.remove_replication_replica_overrides()
self.assertEqual(1, mock_remove_overrides.call_count)
@patch.object(os.path, 'exists', return_value=True)
def test_exists_replication_source_overrides(self, mock_exists):
self.assertTrue(self.mySqlApp.exists_replication_source_overrides())
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_grant_replication_privilege(self, *args):
replication_user = {'name': 'testUSr', 'password': 'somePwd'}
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.grant_replication_privilege(replication_user)
args, _ = self.mock_execute.call_args_list[0]
expected = ("GRANT REPLICATION SLAVE ON *.* TO `testUSr`@`%` "
"IDENTIFIED BY 'somePwd';")
self.assertEqual(expected, args[0].text,
"Replication grant statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_port(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.get_port()
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@port")
self.assertEqual(expected, args[0],
"Port queries are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_binlog_position(self, *args):
result = {'File': 'mysql-bin.003', 'Position': '73'}
self.mock_execute.return_value.first = Mock(return_value=result)
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
found_result = self.mySqlApp.get_binlog_position()
self.assertEqual(result['File'], found_result['log_file'])
self.assertEqual(result['Position'], found_result['position'])
args, _ = self.mock_execute.call_args_list[0]
expected = ("SHOW MASTER STATUS")
self.assertEqual(expected, args[0],
"Master status queries are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_execute_on_client(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.execute_on_client('show tables')
args, _ = self.mock_execute.call_args_list[0]
expected = ("show tables")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_start_slave(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.start_slave()
args, _ = self.mock_execute.call_args_list[0]
expected = ("START SLAVE")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_stop_slave_with_failover(self, *args):
self.mock_execute.return_value.first = Mock(
return_value={'Master_User': 'root'})
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.stop_slave(True)
self.assertEqual('root', result['replication_user'])
expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL"]
self.assertEqual(len(expected), len(self.mock_execute.call_args_list))
for i in range(len(self.mock_execute.call_args_list)):
args, _ = self.mock_execute.call_args_list[i]
self.assertEqual(expected[i], args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(dbaas.MySqlApp, '_wait_for_slave_status')
def test_stop_slave_without_failover(self, *args):
self.mock_execute.return_value.first = Mock(
return_value={'Master_User': 'root'})
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.stop_slave(False)
self.assertEqual('root', result['replication_user'])
expected = ["SHOW SLAVE STATUS", "STOP SLAVE", "RESET SLAVE ALL",
"DROP USER root"]
self.assertEqual(len(expected), len(self.mock_execute.call_args_list))
for i in range(len(self.mock_execute.call_args_list)):
args, _ = self.mock_execute.call_args_list[i]
self.assertEqual(expected[i], args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_stop_master(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.stop_master()
args, _ = self.mock_execute.call_args_list[0]
expected = ("RESET MASTER")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test__wait_for_slave_status(self, *args):
mock_client = Mock()
mock_client.execute = Mock()
result = ['Slave_running', 'on']
mock_client.execute.return_value.first = Mock(return_value=result)
self.mySqlApp._wait_for_slave_status('ON', mock_client, 5)
args, _ = mock_client.execute.call_args_list[0]
expected = ("SHOW GLOBAL STATUS like 'slave_running'")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(utils, 'poll_until', side_effect=PollTimeOut)
def test_fail__wait_for_slave_status(self, *args):
self.assertRaisesRegexp(RuntimeError,
"Replication is not on after 5 seconds.",
self.mySqlApp._wait_for_slave_status, 'ON',
Mock(), 5)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test__get_slave_status(self, *args):
self.mock_execute.return_value.first = Mock(return_value='some_thing')
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp._get_slave_status()
self.assertEqual('some_thing', result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SHOW SLAVE STATUS")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_latest_txn_id(self, *args):
self.mock_execute.return_value.first = Mock(return_value=['some_thing']
)
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.get_latest_txn_id()
self.assertEqual('some_thing', result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@global.gtid_executed")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_wait_for_txn(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.wait_for_txn('abcd')
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT WAIT_UNTIL_SQL_THREAD_AFTER_GTIDS('abcd')")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_get_txn_count(self, *args):
self.mock_execute.return_value.first = Mock(
return_value=['b1f3f33a-0789-ee1c-43f3-f8373e12f1ea:1'])
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
result = self.mySqlApp.get_txn_count()
self.assertEqual(1, result)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SELECT @@global.gtid_executed")
self.assertEqual(expected, args[0],
"Sql statements are not the same")
class MySqlAppInstallTest(MySqlAppTest):
def setUp(self):
super(MySqlAppInstallTest, self).setUp()
self.orig_create_engine = sqlalchemy.create_engine
self.orig_pkg_version = dbaas.packager.pkg_version
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
self.mock_client = Mock()
self.mock_execute = Mock()
self.mock_client.__enter__ = Mock()
self.mock_client.__exit__ = Mock()
self.mock_client.__enter__.return_value.execute = self.mock_execute
def tearDown(self):
super(MySqlAppInstallTest, self).tearDown()
sqlalchemy.create_engine = self.orig_create_engine
dbaas.packager.pkg_version = self.orig_pkg_version
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_install(self):
self.mySqlApp._install_mysql = Mock()
pkg.Package.pkg_is_installed = Mock(return_value=False)
utils.execute_with_timeout = Mock()
pkg.Package.pkg_install = Mock()
self.mySqlApp._clear_mysql_config = Mock()
self.mySqlApp._create_mysql_confd_dir = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.install_if_needed(["package"])
self.assertTrue(pkg.Package.pkg_install.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_secure(self):
dbaas.clear_expired_password = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mySqlApp._write_mycnf = Mock()
self.mysql_stops_successfully()
self.mysql_starts_successfully()
sqlalchemy.create_engine = Mock()
self.mySqlApp.secure('contents', None)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertTrue(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
@patch.object(utils, 'generate_random_password',
return_value='some_password')
def test_secure_root(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.secure_root()
update_root_password, _ = self.mock_execute.call_args_list[0]
update_expected = ("UPDATE mysql.user SET Password="
"PASSWORD('some_password') "
"WHERE User = 'root' AND Host = 'localhost';")
remove_root, _ = self.mock_execute.call_args_list[1]
remove_expected = ("DELETE FROM mysql.user WHERE "
"User = 'root' AND Host != 'localhost';")
self.assertEqual(update_expected, update_root_password[0].text,
"Update root password queries are not the same")
self.assertEqual(remove_expected, remove_root[0].text,
"Remove root queries are not the same")
@patch.object(operating_system, 'create_directory')
def test__create_mysql_confd_dir(self, mkdir_mock):
self.mySqlApp._create_mysql_confd_dir()
mkdir_mock.assert_called_once_with('/etc/mysql/conf.d', as_root=True)
@patch.object(operating_system, 'move')
def test__clear_mysql_config(self, mock_move):
self.mySqlApp._clear_mysql_config()
self.assertEqual(3, mock_move.call_count)
@patch.object(operating_system, 'move', side_effect=ProcessExecutionError)
def test_exception__clear_mysql_config(self, mock_move):
self.mySqlApp._clear_mysql_config()
# call-count needs to be same as normal,
# because exception is eaten to make the flow goto next file-move.
self.assertEqual(3, mock_move.call_count)
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_apply_overrides(self, *args):
overrides = {'sort_buffer_size': 1000000}
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.apply_overrides(overrides)
args, _ = self.mock_execute.call_args_list[0]
expected = ("SET GLOBAL sort_buffer_size=1000000")
self.assertEqual(expected, args[0].text,
"Set global statements are not the same")
@patch.object(dbaas, 'get_engine',
return_value=MagicMock(name='get_engine'))
def test_make_read_only(self, *args):
with patch.object(dbaas, 'LocalSqlClient',
return_value=self.mock_client):
self.mySqlApp.make_read_only('ON')
args, _ = self.mock_execute.call_args_list[0]
expected = ("set global read_only = ON")
self.assertEqual(expected, args[0].text,
"Set read_only statements are not the same")
def test_install_install_error(self):
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
pkg.Package.pkg_is_installed = Mock(return_value=False)
self.mySqlApp._clear_mysql_config = Mock()
self.mySqlApp._create_mysql_confd_dir = Mock()
pkg.Package.pkg_install = \
Mock(side_effect=pkg.PkgPackageStateError("Install error"))
self.assertRaises(pkg.PkgPackageStateError,
self.mySqlApp.install_if_needed, ["package"])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_secure_write_conf_error(self):
dbaas.clear_expired_password = Mock()
self.mySqlApp.start_mysql = Mock()
self.mySqlApp.stop_db = Mock()
self.mySqlApp._write_mycnf = Mock(
side_effect=IOError("Could not write file"))
self.mysql_stops_successfully()
self.mysql_starts_successfully()
sqlalchemy.create_engine = Mock()
self.assertRaises(IOError, self.mySqlApp.secure, "foo", None)
self.assertTrue(self.mySqlApp.stop_db.called)
self.assertTrue(self.mySqlApp._write_mycnf.called)
self.assertFalse(self.mySqlApp.start_mysql.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class TextClauseMatcher(object):
def __init__(self, text):
self.text = text
def __repr__(self):
return "TextClause(%s)" % self.text
def __eq__(self, arg):
print("Matching %s" % arg.text)
return self.text in arg.text
def mock_sql_connection():
utils.execute_with_timeout = MagicMock(return_value=['fake_password',
None])
mock_engine = MagicMock()
sqlalchemy.create_engine = MagicMock(return_value=mock_engine)
mock_conn = MagicMock()
dbaas.LocalSqlClient.__enter__ = MagicMock(return_value=mock_conn)
dbaas.LocalSqlClient.__exit__ = MagicMock(return_value=None)
return mock_conn
class MySqlAppMockTest(testtools.TestCase):
def setUp(self):
super(MySqlAppMockTest, self).setUp()
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlAppMockTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_secure_keep_root(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
utils.execute_with_timeout = MagicMock(return_value=None)
# skip writing the file for now
with patch.object(os.path, 'isfile', return_value=False):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
dbaas.clear_expired_password = MagicMock(return_value=None)
app = MySqlApp(mock_status)
app._write_mycnf = MagicMock(return_value=True)
app.start_mysql = MagicMock(return_value=None)
app.stop_db = MagicMock(return_value=None)
app.secure('foo', None)
self.assertTrue(mock_conn.execute.called)
def test_secure_with_mycnf_error(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_stop': 'service mysql stop'}):
utils.execute_with_timeout = MagicMock(return_value=None)
# skip writing the file for now
with patch.object(os.path, 'isfile', return_value=False):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
dbaas.clear_expired_password = MagicMock(return_value=None)
app = MySqlApp(mock_status)
dbaas.clear_expired_password = MagicMock(return_value=None)
self.assertRaises(TypeError, app.secure, None, None)
self.assertTrue(mock_conn.execute.called)
# At least called twice
self.assertTrue(mock_conn.execute.call_count >= 2)
(mock_status.wait_for_real_status_to_change_to.
assert_called_with(rd_instance.ServiceStatuses.SHUTDOWN,
app.state_change_wait_time, False))
class MySqlRootStatusTest(testtools.TestCase):
def setUp(self):
super(MySqlRootStatusTest, self).setUp()
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
def tearDown(self):
super(MySqlRootStatusTest, self).tearDown()
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
def test_root_is_enabled(self):
mock_conn = mock_sql_connection()
mock_rs = MagicMock()
mock_rs.rowcount = 1
with patch.object(mock_conn, 'execute', return_value=mock_rs):
self.assertThat(MySqlRootAccess().is_root_enabled(), Is(True))
def test_root_is_not_enabled(self):
mock_conn = mock_sql_connection()
mock_rs = MagicMock()
mock_rs.rowcount = 0
with patch.object(mock_conn, 'execute', return_value=mock_rs):
self.assertThat(MySqlRootAccess.is_root_enabled(), Equals(False))
def test_enable_root(self):
mock_conn = mock_sql_connection()
with patch.object(mock_conn, 'execute', return_value=None):
# invocation
user_ser = MySqlRootAccess.enable_root()
# verification
self.assertThat(user_ser, Not(Is(None)))
mock_conn.execute.assert_any_call(TextClauseMatcher('CREATE USER'),
user='root', host='%')
mock_conn.execute.assert_any_call(TextClauseMatcher(
'GRANT ALL PRIVILEGES ON *.*'))
mock_conn.execute.assert_any_call(TextClauseMatcher(
'UPDATE mysql.user'))
def test_enable_root_failed(self):
with patch.object(models.MySQLUser, '_is_valid_user_name',
return_value=False):
self.assertRaises(ValueError, MySqlAdmin().enable_root)
class MockStats:
f_blocks = 1024 ** 2
f_bsize = 4096
f_bfree = 512 * 1024
class InterrogatorTest(testtools.TestCase):
def tearDown(self):
super(InterrogatorTest, self).tearDown()
def test_to_gb(self):
result = to_gb(123456789)
self.assertEqual(0.11, result)
def test_to_gb_zero(self):
result = to_gb(0)
self.assertEqual(0.0, result)
def test_get_filesystem_volume_stats(self):
with patch.object(os, 'statvfs', return_value=MockStats):
result = get_filesystem_volume_stats('/some/path/')
self.assertEqual(4096, result['block_size'])
self.assertEqual(1048576, result['total_blocks'])
self.assertEqual(524288, result['free_blocks'])
self.assertEqual(4.0, result['total'])
self.assertEqual(2147483648, result['free'])
self.assertEqual(2.0, result['used'])
def test_get_filesystem_volume_stats_error(self):
with patch.object(os, 'statvfs', side_effect=OSError):
self.assertRaises(
RuntimeError,
get_filesystem_volume_stats, '/nonexistent/path')
class ServiceRegistryTest(testtools.TestCase):
def setUp(self):
super(ServiceRegistryTest, self).setUp()
def tearDown(self):
super(ServiceRegistryTest, self).tearDown()
def test_datastore_registry_with_extra_manager(self):
datastore_registry_ext_test = {
'test': 'trove.guestagent.datastore.test.manager.Manager',
}
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual(datastore_registry_ext_test.get('test', None),
test_dict.get('test'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.'
'couchbase.manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
def test_datastore_registry_with_existing_manager(self):
datastore_registry_ext_test = {
'mysql': 'trove.guestagent.datastore.mysql.'
'manager.Manager123',
}
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager123',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.couchbase.'
'manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.vertica.'
'manager.Manager',
test_dict.get('vertica'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
def test_datastore_registry_with_blank_dict(self):
datastore_registry_ext_test = dict()
dbaas_sr.get_custom_managers = Mock(
return_value=datastore_registry_ext_test)
test_dict = dbaas_sr.datastore_registry()
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('mysql'))
self.assertEqual('trove.guestagent.datastore.mysql.'
'manager.Manager',
test_dict.get('percona'))
self.assertEqual('trove.guestagent.datastore.experimental.redis.'
'manager.Manager',
test_dict.get('redis'))
self.assertEqual('trove.guestagent.datastore.experimental.cassandra.'
'manager.Manager',
test_dict.get('cassandra'))
self.assertEqual('trove.guestagent.datastore.experimental.couchbase.'
'manager.Manager',
test_dict.get('couchbase'))
self.assertEqual('trove.guestagent.datastore.experimental.mongodb.'
'manager.Manager',
test_dict.get('mongodb'))
self.assertEqual('trove.guestagent.datastore.experimental.couchdb.'
'manager.Manager',
test_dict.get('couchdb'))
self.assertEqual('trove.guestagent.datastore.experimental.vertica.'
'manager.Manager',
test_dict.get('vertica'))
self.assertEqual('trove.guestagent.datastore.experimental.db2.'
'manager.Manager',
test_dict.get('db2'))
class KeepAliveConnectionTest(testtools.TestCase):
class OperationalError(Exception):
def __init__(self, value):
self.args = [value]
def __str__(self):
return repr(self.value)
def setUp(self):
super(KeepAliveConnectionTest, self).setUp()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_LOG_err = dbaas.LOG
def tearDown(self):
super(KeepAliveConnectionTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.LOG = self.orig_LOG_err
def test_checkout_type_error(self):
dbapi_con = Mock()
dbapi_con.ping = Mock(side_effect=TypeError("Type Error"))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(TypeError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
def test_checkout_disconnection_error(self):
dbapi_con = Mock()
dbapi_con.OperationalError = self.OperationalError
dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(2013))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(sqlalchemy.exc.DisconnectionError,
self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
def test_checkout_operation_error(self):
dbapi_con = Mock()
dbapi_con.OperationalError = self.OperationalError
dbapi_con.ping = Mock(side_effect=dbapi_con.OperationalError(1234))
self.keepAliveConn = KeepAliveConnection()
self.assertRaises(self.OperationalError, self.keepAliveConn.checkout,
dbapi_con, Mock(), Mock())
class BaseDbStatusTest(testtools.TestCase):
def setUp(self):
super(BaseDbStatusTest, self).setUp()
util.init_db()
self.orig_dbaas_time_sleep = time.sleep
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(BaseDbStatusTest, self).tearDown()
time.sleep = self.orig_dbaas_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def test_begin_install(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.begin_install()
self.assertEqual(rd_instance.ServiceStatuses.BUILDING,
self.baseDbStatus.status)
def test_begin_restart(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.restart_mode = False
self.baseDbStatus.begin_restart()
self.assertTrue(self.baseDbStatus.restart_mode)
def test_end_install_or_restart(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.SHUTDOWN)
self.baseDbStatus.end_install_or_restart()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN,
self.baseDbStatus.status)
self.assertFalse(self.baseDbStatus.restart_mode)
def test_is_installed(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(self.baseDbStatus.is_installed)
def test_is_installed_none(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = None
self.assertTrue(self.baseDbStatus.is_installed)
def test_is_installed_building(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.BUILDING
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_installed_new(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.NEW
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_installed_failed(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.FAILED
self.assertFalse(self.baseDbStatus.is_installed)
def test_is_restarting(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.restart_mode = True
self.assertTrue(self.baseDbStatus._is_restarting)
def test_is_running(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertTrue(self.baseDbStatus.is_running)
def test_is_running_not(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus.status = rd_instance.ServiceStatuses.SHUTDOWN
self.assertFalse(self.baseDbStatus.is_running)
def test_wait_for_real_status_to_change_to(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.RUNNING)
time.sleep = Mock()
self.assertTrue(self.baseDbStatus.
wait_for_real_status_to_change_to
(rd_instance.ServiceStatuses.RUNNING, 10))
def test_wait_for_real_status_to_change_to_timeout(self):
self.baseDbStatus = BaseDbStatus()
self.baseDbStatus._get_actual_db_status = Mock(
return_value=rd_instance.ServiceStatuses.RUNNING)
time.sleep = Mock()
self.assertFalse(self.baseDbStatus.
wait_for_real_status_to_change_to
(rd_instance.ServiceStatuses.SHUTDOWN, 10))
class MySqlAppStatusTest(testtools.TestCase):
def setUp(self):
super(MySqlAppStatusTest, self).setUp()
util.init_db()
self.orig_utils_execute_with_timeout = dbaas.utils.execute_with_timeout
self.orig_load_mysqld_options = dbaas.load_mysqld_options
self.orig_dbaas_os_path_exists = dbaas.os.path.exists
self.orig_dbaas_time_sleep = time.sleep
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(MySqlAppStatusTest, self).tearDown()
dbaas.utils.execute_with_timeout = self.orig_utils_execute_with_timeout
dbaas.load_mysqld_options = self.orig_load_mysqld_options
dbaas.os.path.exists = self.orig_dbaas_os_path_exists
time.sleep = self.orig_dbaas_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def test_get_actual_db_status(self):
dbaas.utils.execute_with_timeout = Mock(return_value=(None, None))
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status)
@patch.object(utils, 'execute_with_timeout',
side_effect=ProcessExecutionError())
@patch.object(os.path, 'exists', return_value=True)
def test_get_actual_db_status_error_crashed(self, mock_exists,
mock_execute):
dbaas.load_mysqld_options = Mock(return_value={})
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status)
def test_get_actual_db_status_error_shutdown(self):
mocked = Mock(side_effect=ProcessExecutionError())
dbaas.utils.execute_with_timeout = mocked
dbaas.load_mysqld_options = Mock(return_value={})
dbaas.os.path.exists = Mock(return_value=False)
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
def test_get_actual_db_status_error_blocked(self):
dbaas.utils.execute_with_timeout = MagicMock(
side_effect=[ProcessExecutionError(), ("some output", None)])
dbaas.load_mysqld_options = Mock()
dbaas.os.path.exists = Mock(return_value=True)
self.mySqlAppStatus = MySqlAppStatus()
status = self.mySqlAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.BLOCKED, status)
class TestRedisApp(testtools.TestCase):
def setUp(self):
super(TestRedisApp, self).setUp()
self.FAKE_ID = 1000
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
with patch.multiple(RedisApp, _build_admin_client=DEFAULT,
_init_overrides_dir=DEFAULT):
self.app = RedisApp(state_change_wait_time=0)
self.orig_os_path_isfile = os.path.isfile
self.orig_utils_execute_with_timeout = utils.execute_with_timeout
utils.execute_with_timeout = Mock()
rservice.utils.execute_with_timeout = Mock()
def tearDown(self):
super(TestRedisApp, self).tearDown()
self.app = None
os.path.isfile = self.orig_os_path_isfile
utils.execute_with_timeout = self.orig_utils_execute_with_timeout
rservice.utils.execute_with_timeout = \
self.orig_utils_execute_with_timeout
def test_install_if_needed_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=True):
with patch.object(RedisApp, '_install_redis', return_value=None):
self.app.install_if_needed('bar')
pkg.Package.pkg_is_installed.assert_any_call('bar')
self.assertEqual(0, RedisApp._install_redis.call_count)
def test_install_if_needed_not_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=False):
with patch.object(RedisApp, '_install_redis', return_value=None):
self.app.install_if_needed('asdf')
pkg.Package.pkg_is_installed.assert_any_call('asdf')
RedisApp._install_redis.assert_any_call('asdf')
def test_install_redis(self):
with patch.object(utils, 'execute_with_timeout'):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
self.app._install_redis('redis')
pkg.Package.pkg_install.assert_any_call('redis', {}, 1200)
RedisApp.start_redis.assert_any_call()
self.assertTrue(utils.execute_with_timeout.called)
def test_enable_redis_on_boot_without_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_enable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._enable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_enable_redis_on_boot_with_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_enable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._enable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_disable_redis_on_boot_with_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_disable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._disable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_disable_redis_on_boot_without_upstart(self):
cmd = '123'
with patch.object(operating_system, 'service_discovery',
return_value={'cmd_disable': cmd}):
with patch.object(utils, 'execute_with_timeout',
return_value=None):
self.app._disable_redis_on_boot()
operating_system.service_discovery.assert_any_call(
RedisSystem.SERVICE_CANDIDATES)
utils.execute_with_timeout.assert_any_call(
cmd, shell=True)
def test_stop_db_without_fail(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.status = mock_status
RedisApp._disable_redis_on_boot = MagicMock(
return_value=None)
with patch.object(operating_system, 'stop_service') as stop_srv_mock:
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.stop_db(do_not_start_on_reboot=True)
stop_srv_mock.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
def test_stop_db_with_failure(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self.app.status = mock_status
RedisApp._disable_redis_on_boot = MagicMock(
return_value=None)
with patch.object(operating_system, 'stop_service') as stop_srv_mock:
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=False)
self.app.stop_db(do_not_start_on_reboot=True)
stop_srv_mock.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
self.assertTrue(RedisApp._disable_redis_on_boot.called)
self.assertTrue(mock_status.end_install_or_restart.called)
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
def test_restart(self):
mock_status = MagicMock()
self.app.status = mock_status
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(RedisApp, 'stop_db', return_value=None):
with patch.object(RedisApp, 'start_redis', return_value=None):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
self.app.restart()
mock_status.begin_restart.assert_any_call()
RedisApp.stop_db.assert_any_call()
RedisApp.start_redis.assert_any_call()
mock_status.end_install_or_restart.assert_any_call()
def test_start_redis(self):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
self._assert_start_redis(mock_status)
@patch.object(utils, 'execute_with_timeout')
def test_start_redis_with_failure(self, exec_mock):
mock_status = MagicMock()
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=False)
mock_status.end_install_or_restart = MagicMock()
self._assert_start_redis(mock_status)
exec_mock.assert_called_once_with('pkill', '-9', 'redis-server',
run_as_root=True, root_helper='sudo')
mock_status.end_install_or_restart.assert_called_once_with()
@patch.multiple(operating_system, start_service=DEFAULT,
enable_service_on_boot=DEFAULT)
def _assert_start_redis(self, mock_status, start_service,
enable_service_on_boot):
self.app.status = mock_status
self.app.start_redis()
mock_status.wait_for_real_status_to_change_to.assert_called_once_with(
rd_instance.ServiceStatuses.RUNNING, ANY, False)
enable_service_on_boot.assert_called_once_with(
RedisSystem.SERVICE_CANDIDATES)
start_service.assert_called_once_with(RedisSystem.SERVICE_CANDIDATES)
class CassandraDBAppTest(testtools.TestCase):
def setUp(self):
super(CassandraDBAppTest, self).setUp()
self.utils_execute_with_timeout = (
cass_service.utils.execute_with_timeout)
self.sleep = time.sleep
self.pkg_version = cass_service.packager.pkg_version
self.pkg = cass_service.packager
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.cassandra = cass_service.CassandraApp(self.appStatus)
self.orig_unlink = os.unlink
def tearDown(self):
super(CassandraDBAppTest, self).tearDown()
cass_service.utils.execute_with_timeout = (self.
utils_execute_with_timeout)
time.sleep = self.sleep
cass_service.packager.pkg_version = self.pkg_version
cass_service.packager = self.pkg
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.cassandra.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_with_db_update(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.cassandra.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_stop_db_error(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.cassandra.stop_db)
def test_restart(self):
self.cassandra.stop_db = Mock()
self.cassandra.start_db = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.restart()
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.cassandra.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra_runs_forever(self):
cass_service.utils.execute_with_timeout = Mock()
(self.cassandra.status.
wait_for_real_status_to_change_to) = Mock(return_value=False)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.cassandra.stop_db)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.SHUTDOWN.description}))
def test_start_db_with_db_update(self):
cass_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.RUNNING)
self.cassandra.start_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID,
{'service_status':
rd_instance.ServiceStatuses.RUNNING.description}))
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_cassandra_error(self):
self.cassandra._enable_db_on_boot = Mock()
self.cassandra.state_change_wait_time = 1
cass_service.utils.execute_with_timeout = Mock(
side_effect=ProcessExecutionError('Error'))
self.assertRaises(RuntimeError, self.cassandra.start_db)
def test_install(self):
self.cassandra._install_db = Mock()
self.pkg.pkg_is_installed = Mock(return_value=False)
self.cassandra.install_if_needed(['cassandra'])
self.assertTrue(self.cassandra._install_db.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_install_error(self):
self.cassandra.start_db = Mock()
self.cassandra.stop_db = Mock()
self.pkg.pkg_is_installed = Mock(return_value=False)
self.cassandra._install_db = Mock(
side_effect=pkg.PkgPackageStateError("Install error"))
self.assertRaises(pkg.PkgPackageStateError,
self.cassandra.install_if_needed,
['cassandra=1.2.10'])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_cassandra_error_in_write_config_verify_unlink(self):
# this test verifies not only that the write_config
# method properly invoked execute, but also that it properly
# attempted to unlink the file (as a result of the exception)
mock_unlink = Mock(return_value=0)
# We call tempfile.mkstemp() here and Mock() the mkstemp()
# parameter to write_config for testability.
(temp_handle, temp_config_name) = tempfile.mkstemp()
mock_mkstemp = MagicMock(return_value=(temp_handle, temp_config_name))
configuration = 'this is my configuration'
with patch('trove.guestagent.common.operating_system.move',
side_effect=ProcessExecutionError('some exception')):
self.assertRaises(ProcessExecutionError,
self.cassandra.write_config,
config_contents=configuration,
execute_function=Mock(),
mkstemp_function=mock_mkstemp,
unlink_function=mock_unlink)
self.assertEqual(1, mock_unlink.call_count)
# really delete the temporary_config_file
os.unlink(temp_config_name)
@patch.multiple('trove.guestagent.common.operating_system',
chown=DEFAULT, chmod=DEFAULT, move=DEFAULT)
def test_cassandra_write_config(self, chown, chmod, move):
# ensure that write_config creates a temporary file, and then
# moves the file to the final place. Also validate the
# contents of the file written.
# We call tempfile.mkstemp() here and Mock() the mkstemp()
# parameter to write_config for testability.
(temp_handle, temp_config_name) = tempfile.mkstemp()
mock_mkstemp = MagicMock(return_value=(temp_handle, temp_config_name))
configuration = 'some arbitrary configuration text'
mock_execute = MagicMock(return_value=('', ''))
self.cassandra.write_config(configuration,
execute_function=mock_execute,
mkstemp_function=mock_mkstemp)
move.assert_called_with(temp_config_name, cass_system.CASSANDRA_CONF,
as_root=True)
chown.assert_called_with(cass_system.CASSANDRA_CONF,
"cassandra", "cassandra", recursive=False,
as_root=True)
chmod.assert_called_with(
cass_system.CASSANDRA_CONF, FileMode.ADD_READ_ALL, as_root=True)
self.assertEqual(1, mock_mkstemp.call_count)
with open(temp_config_name, 'r') as config_file:
configuration_data = config_file.read()
self.assertEqual(configuration, configuration_data)
# really delete the temporary_config_file
os.unlink(temp_config_name)
class CouchbaseAppTest(testtools.TestCase):
def fake_couchbase_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(CouchbaseAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
couchservice.utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
time.sleep = Mock()
self.orig_service_discovery = operating_system.service_discovery
self.orig_get_ip = netutils.get_my_ipv4
operating_system.service_discovery = (
self.fake_couchbase_service_discovery)
netutils.get_my_ipv4 = Mock()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.couchbaseApp = couchservice.CouchbaseApp(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(CouchbaseAppTest, self).tearDown()
couchservice.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
netutils.get_my_ipv4 = self.orig_get_ip
operating_system.service_discovery = self.orig_service_discovery
time.sleep = self.orig_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.couchbaseApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_error(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.couchbaseApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp.stop_db = Mock()
self.couchbaseApp.start_db = Mock()
self.couchbaseApp.restart()
self.assertTrue(self.couchbaseApp.stop_db.called)
self.assertTrue(self.couchbaseApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchservice.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchbaseApp._enable_db_on_boot = Mock()
self.couchbaseApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_error(self):
mocked = Mock(side_effect=ProcessExecutionError('Error'))
couchservice.utils.execute_with_timeout = mocked
self.couchbaseApp._enable_db_on_boot = Mock()
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
def test_start_db_runs_forever(self):
couchservice.utils.execute_with_timeout = Mock()
self.couchbaseApp._enable_db_on_boot = Mock()
self.couchbaseApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.couchbaseApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_install_when_couchbase_installed(self):
couchservice.packager.pkg_is_installed = Mock(return_value=True)
couchservice.utils.execute_with_timeout = Mock()
self.couchbaseApp.install_if_needed(["package"])
self.assertTrue(couchservice.packager.pkg_is_installed.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class CouchDBAppTest(testtools.TestCase):
def fake_couchdb_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(CouchDBAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
couchdb_service.utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
time.sleep = Mock()
self.orig_service_discovery = operating_system.service_discovery
self.orig_get_ip = netutils.get_my_ipv4
operating_system.service_discovery = (
self.fake_couchdb_service_discovery)
netutils.get_my_ipv4 = Mock()
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.couchdbApp = couchdb_service.CouchDBApp(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(CouchDBAppTest, self).tearDown()
couchdb_service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
netutils.get_my_ipv4 = self.orig_get_ip
operating_system.service_discovery = self.orig_service_discovery
time.sleep = self.orig_time_sleep
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.couchdbApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_error(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.couchdbApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp.stop_db = Mock()
self.couchdbApp.start_db = Mock()
self.couchdbApp.restart()
self.assertTrue(self.couchdbApp.stop_db.called)
self.assertTrue(self.couchdbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called)
def test_start_db(self):
couchdb_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.couchdbApp._enable_db_on_boot = Mock()
self.couchdbApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_error(self):
couchdb_service.utils.execute_with_timeout = Mock(
side_effect=ProcessExecutionError('Error'))
self.couchdbApp._enable_db_on_boot = Mock()
self.assertRaises(RuntimeError, self.couchdbApp.start_db)
def test_install_when_couchdb_installed(self):
couchdb_service.packager.pkg_is_installed = Mock(return_value=True)
couchdb_service.utils.execute_with_timeout = Mock()
self.couchdbApp.install_if_needed(["package"])
self.assertTrue(couchdb_service.packager.pkg_is_installed.called)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class MongoDBAppTest(testtools.TestCase):
def fake_mongodb_service_discovery(self, candidates):
return {
'cmd_start': 'start',
'cmd_stop': 'stop',
'cmd_enable': 'enable',
'cmd_disable': 'disable'
}
def setUp(self):
super(MongoDBAppTest, self).setUp()
self.orig_utils_execute_with_timeout = (mongo_service.
utils.execute_with_timeout)
self.orig_time_sleep = time.sleep
self.orig_packager = mongo_system.PACKAGER
self.orig_service_discovery = operating_system.service_discovery
self.orig_os_unlink = os.unlink
operating_system.service_discovery = (
self.fake_mongodb_service_discovery)
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.mongoDbApp = mongo_service.MongoDBApp(self.appStatus)
time.sleep = Mock()
os.unlink = Mock()
def tearDown(self):
super(MongoDBAppTest, self).tearDown()
mongo_service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
time.sleep = self.orig_time_sleep
mongo_system.PACKAGER = self.orig_packager
operating_system.service_discovery = self.orig_service_discovery
os.unlink = self.orig_os_unlink
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stopdb(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mongoDbApp.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_stop_db_with_db_update(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(
rd_instance.ServiceStatuses.SHUTDOWN)
self.mongoDbApp.stop_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
def test_stop_db_error(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.state_change_wait_time = 1
self.assertRaises(RuntimeError, self.mongoDbApp.stop_db)
def test_restart(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.stop_db = Mock()
self.mongoDbApp.start_db = Mock()
self.mongoDbApp.restart()
self.assertTrue(self.mongoDbApp.stop_db.called)
self.assertTrue(self.mongoDbApp.start_db.called)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'running'}))
def test_start_db(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_start_db_with_update(self):
mongo_service.utils.execute_with_timeout = Mock()
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
self.mongoDbApp.start_db(True)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'running'}))
def test_start_db_runs_forever(self):
mongo_service.utils.execute_with_timeout = Mock(
return_value=["ubuntu 17036 0.0 0.1 618960 "
"29232 pts/8 Sl+ Jan29 0:07 mongod", ""])
self.mongoDbApp.state_change_wait_time = 1
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
self.assertTrue(conductor_api.API.heartbeat.called_once_with(
self.FAKE_ID, {'service_status': 'shutdown'}))
def test_start_db_error(self):
self.mongoDbApp._enable_db_on_boot = Mock()
mocked = Mock(side_effect=ProcessExecutionError('Error'))
mongo_service.utils.execute_with_timeout = mocked
self.assertRaises(RuntimeError, self.mongoDbApp.start_db)
def test_mongodb_error_in_write_config_verify_unlink(self):
configuration = {'config_contents': 'some junk'}
with patch.object(os.path, 'isfile', return_value=True):
with patch.object(operating_system, 'move',
side_effect=ProcessExecutionError):
self.assertRaises(ProcessExecutionError,
self.mongoDbApp.reset_configuration,
configuration=configuration)
self.assertEqual(1, operating_system.move.call_count)
self.assertEqual(1, os.unlink.call_count)
def test_start_db_with_conf_changes_db_is_running(self):
self.mongoDbApp.start_db = Mock()
self.appStatus.status = rd_instance.ServiceStatuses.RUNNING
self.assertRaises(RuntimeError,
self.mongoDbApp.start_db_with_conf_changes,
Mock())
def test_install_when_db_installed(self):
packager_mock = MagicMock()
packager_mock.pkg_is_installed = MagicMock(return_value=True)
mongo_system.PACKAGER = packager_mock
self.mongoDbApp.install_if_needed(['package'])
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_install_when_db_not_installed(self):
packager_mock = MagicMock()
packager_mock.pkg_is_installed = MagicMock(return_value=False)
mongo_system.PACKAGER = packager_mock
self.mongoDbApp.install_if_needed(['package'])
packager_mock.pkg_install.assert_any_call(ANY, {}, ANY)
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class VerticaAppStatusTest(testtools.TestCase):
def setUp(self):
super(VerticaAppStatusTest, self).setUp()
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
def tearDown(self):
super(VerticaAppStatusTest, self).tearDown()
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
def test_get_actual_db_status(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(return_value=['db_srvr', None])):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.RUNNING, status)
def test_get_actual_db_status_shutdown(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', None],
['db_srvr', None]])):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.SHUTDOWN, status)
def test_get_actual_db_status_error_crashed(self):
self.verticaAppStatus = VerticaAppStatus()
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=ProcessExecutionError('problem'
))):
status = self.verticaAppStatus._get_actual_db_status()
self.assertEqual(rd_instance.ServiceStatuses.CRASHED, status)
class VerticaAppTest(testtools.TestCase):
def setUp(self):
super(VerticaAppTest, self).setUp()
self.FAKE_ID = 1000
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.app = VerticaApp(self.appStatus)
self.setread = VolumeDevice.set_readahead_size
self.Popen = subprocess.Popen
vertica_system.shell_execute = MagicMock(return_value=('', ''))
VolumeDevice.set_readahead_size = Mock()
subprocess.Popen = Mock()
self.test_config = ConfigParser.ConfigParser()
self.test_config.add_section('credentials')
self.test_config.set('credentials',
'dbadmin_password', 'some_password')
def tearDown(self):
super(VerticaAppTest, self).tearDown()
self.app = None
VolumeDevice.set_readahead_size = self.setread
subprocess.Popen = self.Popen
def test_install_if_needed_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=True):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
self.app.install_if_needed('vertica')
pkg.Package.pkg_is_installed.assert_any_call('vertica')
self.assertEqual(0, pkg.Package.pkg_install.call_count)
def test_install_if_needed_not_installed(self):
with patch.object(pkg.Package, 'pkg_is_installed', return_value=False):
with patch.object(pkg.Package, 'pkg_install', return_value=None):
self.app.install_if_needed('vertica')
pkg.Package.pkg_is_installed.assert_any_call('vertica')
self.assertEqual(1, pkg.Package.pkg_install.call_count)
def test_prepare_for_install_vertica(self):
self.app.prepare_for_install_vertica()
arguments = vertica_system.shell_execute.call_args_list[0]
self.assertEqual(1, VolumeDevice.set_readahead_size.call_count)
expected_command = (
"VERT_DBA_USR=dbadmin VERT_DBA_HOME=/home/dbadmin "
"VERT_DBA_GRP=verticadba /opt/vertica/oss/python/bin/python"
" -m vertica.local_coerce")
arguments.assert_called_with(expected_command)
def test_failure_prepare_for_install_vertica(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaises(ProcessExecutionError,
self.app.prepare_for_install_vertica)
def test_install_vertica(self):
with patch.object(self.app, 'write_config',
return_value=None):
self.app.install_vertica(members='10.0.0.2')
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (
vertica_system.INSTALL_VERTICA % ('10.0.0.2', '/var/lib/vertica'))
arguments.assert_called_with(expected_command)
def test_failure_install_vertica(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('some exception')):
self.assertRaisesRegexp(RuntimeError, 'install_vertica failed.',
self.app.install_vertica,
members='10.0.0.2')
def test_create_db(self):
with patch.object(self.app, 'read_config',
return_value=self.test_config):
self.app.create_db(members='10.0.0.2')
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (vertica_system.CREATE_DB % ('10.0.0.2', 'db_srvr',
'/var/lib/vertica',
'/var/lib/vertica',
'some_password'))
arguments.assert_called_with(expected_command, 'dbadmin')
def test_failure_create_db(self):
with patch.object(self.app, 'read_config',
side_effect=RuntimeError('Error')):
self.assertRaisesRegexp(RuntimeError,
'Vertica database create failed.',
self.app.create_db)
# Because of an exception in read_config there was no shell execution.
self.assertEqual(0, vertica_system.shell_execute.call_count)
def test_vertica_write_config(self):
temp_file_handle = tempfile.NamedTemporaryFile(delete=False)
mock_mkstemp = MagicMock(return_value=(temp_file_handle))
mock_unlink = Mock(return_value=0)
self.app.write_config(config=self.test_config,
temp_function=mock_mkstemp,
unlink_function=mock_unlink)
arguments = vertica_system.shell_execute.call_args_list[0]
expected_command = (
("install -o root -g root -m 644 %(source)s %(target)s"
) % {'source': temp_file_handle.name,
'target': vertica_system.VERTICA_CONF})
arguments.assert_called_with(expected_command)
self.assertEqual(1, mock_mkstemp.call_count)
configuration_data = ConfigParser.ConfigParser()
configuration_data.read(temp_file_handle.name)
self.assertEqual(
self.test_config.get('credentials', 'dbadmin_password'),
configuration_data.get('credentials', 'dbadmin_password'))
self.assertEqual(1, mock_unlink.call_count)
# delete the temporary_config_file
os.unlink(temp_file_handle.name)
def test_vertica_error_in_write_config_verify_unlink(self):
mock_unlink = Mock(return_value=0)
temp_file_handle = tempfile.NamedTemporaryFile(delete=False)
mock_mkstemp = MagicMock(return_value=temp_file_handle)
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('some exception')):
self.assertRaises(ProcessExecutionError,
self.app.write_config,
config=self.test_config,
temp_function=mock_mkstemp,
unlink_function=mock_unlink)
self.assertEqual(1, mock_unlink.call_count)
# delete the temporary_config_file
os.unlink(temp_file_handle.name)
def test_restart(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
mock_status.begin_restart = MagicMock(return_value=None)
with patch.object(VerticaApp, 'stop_db', return_value=None):
with patch.object(VerticaApp, 'start_db', return_value=None):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.restart()
mock_status.begin_restart.assert_any_call()
VerticaApp.stop_db.assert_any_call()
VerticaApp.start_db.assert_any_call()
def test_start_db(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_enable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.start_db()
agent_start, db_start = subprocess.Popen.call_args_list
agent_expected_command = [
'sudo', 'su', '-', 'root', '-c',
(vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'start')]
db_expected_cmd = [
'sudo', 'su', '-', 'dbadmin', '-c',
(vertica_system.START_DB % ('db_srvr', 'some_password'))]
self.assertTrue(mock_status.end_install_or_restart.called)
agent_start.assert_called_with(agent_expected_command)
db_start.assert_called_with(db_expected_cmd)
def test_start_db_failure(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
with patch.object(app, '_enable_db_on_boot',
side_effect=RuntimeError()):
with patch.object(app, 'read_config',
return_value=self.test_config):
self.assertRaises(RuntimeError, app.start_db)
def test_stop_db(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=True)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
app.stop_db()
self.assertEqual(
3, vertica_system.shell_execute.call_count)
# There are 3 shell-executions:
# a) stop vertica-agent service
# b) check daatabase status
# c) stop_db
# We are matcing that 3rd command called was stop_db
arguments = vertica_system.shell_execute.call_args_list[2]
expected_cmd = (vertica_system.STOP_DB % ('db_srvr',
'some_password'))
self.assertTrue(
mock_status.wait_for_real_status_to_change_to.called)
arguments.assert_called_with(expected_cmd, 'dbadmin')
def test_stop_db_do_not_start_on_reboot(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=True)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
app.stop_db(do_not_start_on_reboot=True)
self.assertEqual(
3, vertica_system.shell_execute.call_count)
app._disable_db_on_boot.assert_any_call()
def test_stop_db_database_not_running(self):
mock_status = MagicMock()
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
app.stop_db()
# Since database stop command does not gets executed,
# so only 2 shell calls were there.
self.assertEqual(
2, vertica_system.shell_execute.call_count)
def test_stop_db_failure(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, '_disable_db_on_boot', return_value=None):
with patch.object(app, 'read_config',
return_value=self.test_config):
with patch.object(vertica_system, 'shell_execute',
MagicMock(side_effect=[['', ''],
['db_srvr', None],
['', '']])):
mock_status.wait_for_real_status_to_change_to = MagicMock(
return_value=None)
mock_status.end_install_or_restart = MagicMock(
return_value=None)
self.assertRaises(RuntimeError, app.stop_db)
def test_export_conf_to_members(self):
self.app._export_conf_to_members(members=['member1', 'member2'])
self.assertEqual(2, vertica_system.shell_execute.call_count)
def test_fail__export_conf_to_members(self):
app = VerticaApp(MagicMock())
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaises(ProcessExecutionError,
app._export_conf_to_members,
['member1', 'member2'])
def test_authorize_public_keys(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
self.app.authorize_public_keys(user=user, public_keys=keys)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/authorized_keys')
def test_authorize_public_keys_authorized_file_not_exists(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
['', '']])):
self.app.authorize_public_keys(user=user, public_keys=keys)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/authorized_keys')
def test_fail_authorize_public_keys(self):
user = 'test_user'
keys = ['test_key@machine1', 'test_key@machine2']
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
ProcessExecutionError('Some Error')
])):
self.assertRaises(ProcessExecutionError,
self.app.authorize_public_keys, user, keys)
def test_get_public_keys(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
self.app.get_public_keys(user=user)
self.assertEqual(2, vertica_system.shell_execute.call_count)
vertica_system.shell_execute.assert_any_call(
(vertica_system.SSH_KEY_GEN % ('/home/' + user)), user)
vertica_system.shell_execute.assert_any_call(
'cat ' + '/home/' + user + '/.ssh/id_rsa.pub')
def test_get_public_keys_if_key_exists(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
['some_key', None]])):
key = self.app.get_public_keys(user=user)
self.assertEqual(2, vertica_system.shell_execute.call_count)
self.assertEqual('some_key', key)
def test_fail_get_public_keys(self):
user = 'test_user'
with patch.object(os.path, 'expanduser',
return_value=('/home/' + user)):
with patch.object(
vertica_system, 'shell_execute',
MagicMock(side_effect=[ProcessExecutionError('Some Error'),
ProcessExecutionError('Some Error')
])):
self.assertRaises(ProcessExecutionError,
self.app.get_public_keys, user)
def test_install_cluster(self):
with patch.object(self.app, 'read_config',
return_value=self.test_config):
self.app.install_cluster(members=['member1', 'member2'])
# Verifying nu,ber of shell calls,
# as command has already been tested in preceeding tests
self.assertEqual(5, vertica_system.shell_execute.call_count)
def test__enable_db_on_boot(self):
app = VerticaApp(MagicMock())
app._enable_db_on_boot()
restart_policy, agent_enable = subprocess.Popen.call_args_list
expected_restart_policy = [
'sudo', 'su', '-', 'dbadmin', '-c',
(vertica_system.SET_RESTART_POLICY % ('db_srvr', 'always'))]
expected_agent_enable = [
'sudo', 'su', '-', 'root', '-c',
(vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'enable')]
self.assertEqual(2, subprocess.Popen.call_count)
restart_policy.assert_called_with(expected_restart_policy)
agent_enable.assert_called_with(expected_agent_enable)
def test_failure__enable_db_on_boot(self):
with patch.object(subprocess, 'Popen', side_effect=OSError):
self.assertRaisesRegexp(RuntimeError,
'Could not enable db on boot.',
self.app._enable_db_on_boot)
def test__disable_db_on_boot(self):
app = VerticaApp(MagicMock())
app._disable_db_on_boot()
restart_policy, agent_disable = (
vertica_system.shell_execute.call_args_list)
expected_restart_policy = (
vertica_system.SET_RESTART_POLICY % ('db_srvr', 'never'))
expected_agent_disable = (
vertica_system.VERTICA_AGENT_SERVICE_COMMAND % 'disable')
self.assertEqual(2, vertica_system.shell_execute.call_count)
restart_policy.assert_called_with(expected_restart_policy, 'dbadmin')
agent_disable.assert_called_with(expected_agent_disable, 'root')
def test_failure__disable_db_on_boot(self):
with patch.object(vertica_system, 'shell_execute',
side_effect=ProcessExecutionError('Error')):
self.assertRaisesRegexp(RuntimeError,
'Could not disable db on boot.',
self.app._disable_db_on_boot)
def test_read_config(self):
app = VerticaApp(MagicMock())
with patch.object(ConfigParser, 'ConfigParser',
return_value=self.test_config):
test_config = app.read_config()
self.assertEqual('some_password',
test_config.get('credentials', 'dbadmin_password')
)
def test_fail_read_config(self):
with patch.object(ConfigParser.ConfigParser, 'read',
side_effect=ConfigParser.Error()):
self.assertRaises(RuntimeError, self.app.read_config)
def test_complete_install_or_restart(self):
app = VerticaApp(MagicMock())
app.complete_install_or_restart()
app.status.end_install_or_restart.assert_any_call()
def test_start_db_with_conf_changes(self):
mock_status = MagicMock()
type(mock_status)._is_restarting = PropertyMock(return_value=False)
app = VerticaApp(mock_status)
with patch.object(app, 'read_config',
return_value=self.test_config):
app.start_db_with_conf_changes('test_config_contents')
app.status.end_install_or_restart.assert_any_call()
class DB2AppTest(testtools.TestCase):
def setUp(self):
super(DB2AppTest, self).setUp()
self.orig_utils_execute_with_timeout = (
db2service.utils.execute_with_timeout)
util.init_db()
self.FAKE_ID = str(uuid4())
InstanceServiceStatus.create(instance_id=self.FAKE_ID,
status=rd_instance.ServiceStatuses.NEW)
self.appStatus = FakeAppStatus(self.FAKE_ID,
rd_instance.ServiceStatuses.NEW)
self.db2App = db2service.DB2App(self.appStatus)
dbaas.CONF.guest_id = self.FAKE_ID
def tearDown(self):
super(DB2AppTest, self).tearDown()
db2service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
InstanceServiceStatus.find_by(instance_id=self.FAKE_ID).delete()
dbaas.CONF.guest_id = None
self.db2App = None
def assert_reported_status(self, expected_status):
service_status = InstanceServiceStatus.find_by(
instance_id=self.FAKE_ID)
self.assertEqual(expected_status, service_status.status)
def test_stop_db(self):
db2service.utils.execute_with_timeout = MagicMock(return_value=None)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.SHUTDOWN)
self.db2App.stop_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
def test_restart_server(self):
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
mock_status = MagicMock(return_value=None)
app = db2service.DB2App(mock_status)
mock_status.begin_restart = MagicMock(return_value=None)
app.stop_db = MagicMock(return_value=None)
app.start_db = MagicMock(return_value=None)
app.restart()
self.assertTrue(mock_status.begin_restart.called)
self.assertTrue(app.stop_db.called)
self.assertTrue(app.start_db.called)
def test_start_db(self):
db2service.utils.execute_with_timeout = MagicMock(return_value=None)
self.appStatus.set_next_status(rd_instance.ServiceStatuses.RUNNING)
with patch.object(self.db2App, '_enable_db_on_boot',
return_value=None):
self.db2App.start_db()
self.assert_reported_status(rd_instance.ServiceStatuses.NEW)
class DB2AdminTest(testtools.TestCase):
def setUp(self):
super(DB2AdminTest, self).setUp()
self.db2Admin = db2service.DB2Admin()
self.orig_utils_execute_with_timeout = (
db2service.utils.execute_with_timeout)
def tearDown(self):
super(DB2AdminTest, self).tearDown()
db2service.utils.execute_with_timeout = (
self.orig_utils_execute_with_timeout)
def test_delete_database(self):
with patch.object(
db2service, 'run_command',
MagicMock(
return_value=None,
side_effect=ProcessExecutionError('Error'))):
self.assertRaises(GuestError,
self.db2Admin.delete_database,
FAKE_DB)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 drop database testDB"
self.assertEqual(expected, args[0],
"Delete database queries are not the same")
def test_list_databases(self):
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
self.db2Admin.list_databases()
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 list database directory " \
"| grep -B6 -i indirect | grep 'Database name' | " \
"sed 's/.*= //'"
self.assertEqual(expected, args[0],
"Delete database queries are not the same")
def test_create_users(self):
with patch.object(db2service, 'run_command', MagicMock(
return_value=None)):
db2service.utils.execute_with_timeout = MagicMock(
return_value=None)
self.db2Admin.create_user(FAKE_USER)
self.assertTrue(db2service.utils.execute_with_timeout.called)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 GRANT DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \
"ON DATABASE TO USER random; db2 connect reset"
self.assertEqual(
expected, args[0],
"Granting database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_delete_users_with_db(self):
with patch.object(db2service, 'run_command',
MagicMock(return_value=None)):
with patch.object(db2service.DB2Admin, 'list_access',
MagicMock(return_value=None)):
utils.execute_with_timeout = MagicMock(return_value=None)
self.db2Admin.delete_user(FAKE_USER[0])
self.assertTrue(db2service.run_command.called)
self.assertTrue(db2service.utils.execute_with_timeout.called)
self.assertFalse(db2service.DB2Admin.list_access.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT,DATAACCESS " \
"ON DATABASE FROM USER random; db2 connect reset"
self.assertEqual(
expected, args[0],
"Revoke database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_delete_users_without_db(self):
FAKE_USER.append(
{"_name": "random2", "_password": "guesswhat", "_databases": []})
with patch.object(db2service, 'run_command',
MagicMock(return_value=None)):
with patch.object(db2service.DB2Admin, 'list_access',
MagicMock(return_value=[FAKE_DB])):
utils.execute_with_timeout = MagicMock(return_value=None)
self.db2Admin.delete_user(FAKE_USER[1])
self.assertTrue(db2service.run_command.called)
self.assertTrue(db2service.DB2Admin.list_access.called)
self.assertTrue(
db2service.utils.execute_with_timeout.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 connect to testDB; " \
"db2 REVOKE DBADM,CREATETAB,BINDADD,CONNECT," \
"DATAACCESS ON DATABASE FROM USER random2; " \
"db2 connect reset"
self.assertEqual(
expected, args[0],
"Revoke database access queries are not the same")
self.assertEqual(1, db2service.run_command.call_count)
def test_list_users(self):
databases = []
databases.append(FAKE_DB)
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
with patch.object(self.db2Admin, "list_databases",
MagicMock(return_value=(databases, None))):
self.db2Admin.list_users()
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 +o connect to testDB; " \
"db2 -x select grantee, dataaccessauth " \
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(expected, args[0],
"List database queries are not the same")
def test_get_user(self):
databases = []
databases.append(FAKE_DB)
with patch.object(db2service, 'run_command', MagicMock(
side_effect=ProcessExecutionError('Error'))):
with patch.object(self.db2Admin, "list_databases",
MagicMock(return_value=(databases, None))):
self.db2Admin._get_user('random', None)
self.assertTrue(db2service.run_command.called)
args, _ = db2service.run_command.call_args_list[0]
expected = "db2 +o connect to testDB; " \
"db2 -x select grantee, dataaccessauth " \
"from sysibm.sysdbauth; db2 connect reset"
self.assertEqual(args[0], expected,
"Delete database queries are not the same")
| apache-2.0 |
dbentley/pants | tests/python/pants_test/engine/test_round_engine.py | 5 | 10965 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import itertools
from pants.engine.round_engine import RoundEngine
from pants.task.task import Task
from pants_test.base_test import BaseTest
from pants_test.engine.base_engine_test import EngineTestBase
class RoundEngineTest(EngineTestBase, BaseTest):
def setUp(self):
super(RoundEngineTest, self).setUp()
self.set_options_for_scope('', explain=False)
for outer in ['goal1', 'goal2', 'goal3', 'goal4', 'goal5']:
for inner in ['task1', 'task2', 'task3', 'task4', 'task5']:
self.set_options_for_scope('{}.{}'.format(outer, inner),
level='info', colors=False)
self.engine = RoundEngine()
self.actions = []
def tearDown(self):
self.assertTrue(not self._context or self._context.is_unlocked())
super(RoundEngineTest, self).tearDown()
def alternate_target_roots_action(self, tag):
return 'alternate_target_roots', tag, self._context
def prepare_action(self, tag):
return 'prepare', tag, self._context
def execute_action(self, tag):
return 'execute', tag, self._context
def construct_action(self, tag):
return 'construct', tag, self._context
def record(self, tag, product_types=None, required_data=None, alternate_target_roots=None):
class RecordingTask(Task):
options_scope = tag
@classmethod
def product_types(cls):
return product_types or []
@classmethod
def alternate_target_roots(cls, options, address_mapper, build_graph):
self.actions.append(self.alternate_target_roots_action(tag))
return alternate_target_roots
@classmethod
def prepare(cls, options, round_manager):
for requirement in (required_data or ()):
round_manager.require_data(requirement)
self.actions.append(self.prepare_action(tag))
def __init__(me, *args, **kwargs):
super(RecordingTask, me).__init__(*args, **kwargs)
self.actions.append(self.construct_action(tag))
def execute(me):
self.actions.append(self.execute_action(tag))
return RecordingTask
def install_task(self, name, product_types=None, goal=None, required_data=None,
alternate_target_roots=None):
"""Install a task to goal and return all installed tasks of the goal.
This is needed to initialize tasks' context.
"""
task_type = self.record(name, product_types, required_data, alternate_target_roots)
return super(RoundEngineTest,
self).install_task(name=name, action=task_type, goal=goal).task_types()
def create_context(self, for_task_types=None, target_roots=None):
self._context = self.context(for_task_types=for_task_types, target_roots=target_roots)
self.assertTrue(self._context.is_unlocked())
def assert_actions(self, *expected_execute_ordering):
expected_pre_execute_actions = set()
expected_execute_actions = []
for action in expected_execute_ordering:
expected_pre_execute_actions.add(self.alternate_target_roots_action(action))
expected_pre_execute_actions.add(self.prepare_action(action))
expected_execute_actions.append(self.construct_action(action))
expected_execute_actions.append(self.execute_action(action))
expeceted_execute_actions_length = len(expected_execute_ordering) * 2
self.assertEqual(expected_pre_execute_actions,
set(self.actions[:-expeceted_execute_actions_length]))
self.assertEqual(expected_execute_actions, self.actions[-expeceted_execute_actions_length:])
def test_lifecycle_ordering(self):
task1 = self.install_task('task1', goal='goal1', product_types=['1'])
task2 = self.install_task('task2', goal='goal1', product_types=['2'], required_data=['1'])
task3 = self.install_task('task3', goal='goal3', product_types=['3'], required_data=['2'])
task4 = self.install_task('task4', goal='goal4', required_data=['1', '2', '3'])
self.create_context(for_task_types=task1+task2+task3+task4)
self.engine.attempt(self._context, self.as_goals('goal4'))
self.assert_actions('task1', 'task2', 'task3', 'task4')
def test_lifecycle_ordering_install_order_invariant(self):
# Here we swap the order of goal3 and goal4 task installation from the order in
# `test_lifecycle_ordering` above. We can't swap task1 and task2 since they purposefully
# do have an implicit order dependence with a dep inside the same goal.
task1 = self.install_task('task1', goal='goal1', product_types=['1'])
task2 = self.install_task('task2', goal='goal1', product_types=['2'], required_data=['1'])
task3 = self.install_task('task4', goal='goal4', required_data=['1', '2', '3'])
task4 = self.install_task('task3', goal='goal3', product_types=['3'], required_data=['2'])
self.create_context(for_task_types=task1+task2+task3+task4)
self.engine.attempt(self._context, self.as_goals('goal4'))
self.assert_actions('task1', 'task2', 'task3', 'task4')
def test_inter_goal_dep(self):
task1 = self.install_task('task1', goal='goal1', product_types=['1'])
task2 = self.install_task('task2', goal='goal1', required_data=['1'])
self.create_context(for_task_types=task1+task2)
self.engine.attempt(self._context, self.as_goals('goal1'))
self.assert_actions('task1', 'task2')
def test_inter_goal_dep_self_cycle_ok(self):
task = self.install_task('task1', goal='goal1', product_types=['1'],
required_data=['1'])
self.create_context(for_task_types=task)
self.engine.attempt(self._context, self.as_goals('goal1'))
self.assert_actions('task1')
def test_inter_goal_dep_downstream(self):
task1 = self.install_task('task1', goal='goal1', required_data=['1'])
task2 = self.install_task('task2', goal='goal1', product_types=['1'])
self.create_context(for_task_types=task1+task2)
with self.assertRaises(self.engine.TaskOrderError):
self.engine.attempt(self._context, self.as_goals('goal1'))
def test_missing_product(self):
task = self.install_task('task1', goal='goal1', required_data=['1'])
self.create_context(for_task_types=task)
with self.assertRaises(self.engine.MissingProductError):
self.engine.attempt(self._context, self.as_goals('goal1'))
def test_goal_cycle_direct(self):
task1 = self.install_task('task1', goal='goal1', required_data=['2'], product_types=['1'])
task2 = self.install_task('task2', goal='goal2', required_data=['1'], product_types=['2'])
self.create_context(for_task_types=task1+task2)
for goal in ('goal1', 'goal2'):
with self.assertRaises(self.engine.GoalCycleError):
self.engine.attempt(self._context, self.as_goals(goal))
def test_goal_cycle_indirect(self):
task1 = self.install_task('task1', goal='goal1', required_data=['2'], product_types=['1'])
task2 = self.install_task('task2', goal='goal2', required_data=['3'], product_types=['2'])
task3 = self.install_task('task3', goal='goal3', required_data=['1'], product_types=['3'])
self.create_context(for_task_types=task1+task2+task3)
for goal in ('goal1', 'goal2', 'goal3'):
with self.assertRaises(self.engine.GoalCycleError):
self.engine.attempt(self._context, self.as_goals(goal))
def test_goal_ordering_unconstrained_respects_cli_order(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2')
task3 = self.install_task('task3', goal='goal3')
self.create_context(for_task_types=task1+task2+task3)
for permutation in itertools.permutations([('task1', 'goal1'),
('task2', 'goal2'),
('task3', 'goal3')]):
self.actions = []
self.engine.attempt(self._context, self.as_goals(*[goal for task, goal in permutation]))
expected_execute_actions = [task for task, goal in permutation]
self.assert_actions(*expected_execute_actions)
def test_goal_ordering_constrained_conflicts_cli_order(self):
task1 = self.install_task('task1', goal='goal1', required_data=['2'])
task2 = self.install_task('task2', goal='goal2', product_types=['2'])
self.create_context(for_task_types=task1+task2)
self.engine.attempt(self._context, self.as_goals('goal1', 'goal2'))
self.assert_actions('task2', 'task1')
def test_goal_ordering_mixed_constraints_and_cli_order(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2')
task3 = self.install_task('task3', goal='goal3')
task4 = self.install_task('task4', goal='goal4', required_data=['5'])
task5 = self.install_task('task5', goal='goal5', product_types=['5'])
self.create_context(for_task_types=task1+task2+task3+task4+task5)
self.engine.attempt(self._context,
self.as_goals('goal1', 'goal2', 'goal4', 'goal5', 'goal3'))
self.assert_actions('task1', 'task2', 'task5', 'task4', 'task3')
def test_cli_goals_deduped(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2')
task3 = self.install_task('task3', goal='goal3')
self.create_context(for_task_types=task1+task2+task3)
self.engine.attempt(self._context,
self.as_goals('goal1', 'goal2', 'goal1', 'goal3', 'goal2'))
self.assert_actions('task1', 'task2', 'task3')
def test_replace_target_roots(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2', alternate_target_roots=[42])
self.create_context(for_task_types=task1+task2)
self.assertEquals([], self._context.target_roots)
self.engine.attempt(self._context, self.as_goals('goal1', 'goal2'))
self.assertEquals([42], self._context.target_roots)
def test_replace_target_roots_conflict(self):
task1 = self.install_task('task1', goal='goal1', alternate_target_roots=[42])
task2 = self.install_task('task2', goal='goal2', alternate_target_roots=[1, 2])
self.create_context(for_task_types=task1+task2)
with self.assertRaises(self.engine.TargetRootsReplacement.ConflictingProposalsError):
self.engine.attempt(self._context, self.as_goals('goal1', 'goal2'))
def test_replace_target_roots_to_empty_list(self):
task1 = self.install_task('task1', goal='goal1')
task2 = self.install_task('task2', goal='goal2', alternate_target_roots=[])
target = self.make_target('t')
self.create_context(for_task_types=task1+task2, target_roots=[target])
self.engine.attempt(self._context, self.as_goals('goal1', 'goal2'))
self.assertEquals([], self._context.target_roots)
| apache-2.0 |
morelab/appcomposer | alembic/versions/2ef3688b5383_add_translationexternalsuggestions.py | 3 | 1733 | """Add TranslationExternalSuggestions
Revision ID: 2ef3688b5383
Revises: 20860ffde766
Create Date: 2015-04-19 12:43:34.752894
"""
# revision identifiers, used by Alembic.
revision = '2ef3688b5383'
down_revision = '20860ffde766'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('TranslationExternalSuggestions',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('engine', sa.Unicode(length=20), nullable=True),
sa.Column('human_key', sa.Unicode(length=255), nullable=True),
sa.Column('language', sa.Unicode(length=255), nullable=True),
sa.Column('value', sa.UnicodeText(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('engine', 'human_key', 'language')
)
op.create_index(u'ix_TranslationExternalSuggestions_engine', 'TranslationExternalSuggestions', ['engine'], unique=False)
op.create_index(u'ix_TranslationExternalSuggestions_human_key', 'TranslationExternalSuggestions', ['human_key'], unique=False)
op.create_index(u'ix_TranslationExternalSuggestions_language', 'TranslationExternalSuggestions', ['language'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(u'ix_TranslationExternalSuggestions_language', table_name='TranslationExternalSuggestions')
op.drop_index(u'ix_TranslationExternalSuggestions_human_key', table_name='TranslationExternalSuggestions')
op.drop_index(u'ix_TranslationExternalSuggestions_engine', table_name='TranslationExternalSuggestions')
op.drop_table('TranslationExternalSuggestions')
### end Alembic commands ###
| bsd-2-clause |
wangyum/tensorflow | tensorflow/python/kernel_tests/cast_op_test.py | 62 | 8621 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.tf.cast."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import sys
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class CastOpTest(test.TestCase):
def _toDataType(self, dtype):
"""Returns TensorFlow data type for numpy type."""
if dtype == np.float32:
return dtypes.float32
elif dtype == np.float64:
return dtypes.float64
elif dtype == np.int32:
return dtypes.int32
elif dtype == np.int64:
return dtypes.int64
elif dtype == np.bool:
return dtypes.bool
elif dtype == np.complex64:
return dtypes.complex64
elif dtype == np.complex128:
return dtypes.complex128
else:
return None
def _cast(self, x, dtype, use_gpu=False):
with self.test_session(use_gpu=use_gpu):
val = constant_op.constant(x, self._toDataType(np.array([x]).dtype))
return math_ops.cast(val, self._toDataType(dtype), name="cast").eval()
def _test(self, x, dtype, use_gpu=False):
"""Tests cast(x) to dtype behaves the same as numpy.astype."""
np_ans = x.astype(dtype)
tf_ans = self._cast(x, dtype, use_gpu)
self.assertAllEqual(np_ans, tf_ans)
def _testTypes(self, x, use_gpu=False):
"""Tests cast(x) to different tf."""
if use_gpu:
type_list = [
np.float32, np.float64, np.int64, np.complex64, np.complex128
]
else:
type_list = [
np.float32, np.float64, np.int32, np.int64, np.complex64,
np.complex128
]
for from_type in type_list:
for to_type in type_list:
self._test(x.astype(from_type), to_type, use_gpu)
self._test(x.astype(np.bool), np.float32, use_gpu)
self._test(x.astype(np.uint8), np.float32, use_gpu)
if not use_gpu:
self._test(x.astype(np.bool), np.int32, use_gpu)
self._test(x.astype(np.int32), np.int32, use_gpu)
def _testAll(self, x):
self._testTypes(x, use_gpu=False)
if x.dtype == np.float32 or x.dtype == np.float64:
self._testTypes(x, use_gpu=True)
def testBasic(self):
self._testAll(np.arange(-10, 10).reshape(2, 10))
self._testAll(np.linspace(-10, 10, 17))
def testSmallValues(self):
f4 = np.finfo(np.float32)
f8 = np.finfo(np.float64)
self._testAll(
np.array([
0, -1, 1, -f4.resolution, f4.resolution, f8.resolution,
-f8.resolution
]))
def testBfloat16(self):
a = np.random.uniform(-100, 100, 100).astype(np.float32)
with self.test_session(use_gpu=False):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
with self.test_session(use_gpu=True):
b = math_ops.cast(math_ops.cast(a, dtypes.bfloat16), dtypes.float32)
self.assertAllClose(a, b.eval(), rtol=1 / 128.)
def testRandom(self):
self._testAll(np.random.normal(0, 10, 210).reshape([2, 3, 5, 7]))
self._testAll(np.random.normal(0, 1e6, 210).reshape([2, 3, 5, 7]))
# Special values like int32max, int64min, inf, -inf, nan casted to
# integer values in somewhat unexpected ways. And they behave
# differently on CPU and GPU.
def _compare(self, x, dst_dtype, expected, use_gpu=False):
np.testing.assert_equal(
self._cast(
x, dst_dtype, use_gpu=use_gpu), dst_dtype(expected))
def testIntToFloatBoundary(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(i4.min, np.float32, i4.min, False)
self._compare(i4.max, np.float32, i4.max, False)
self._compare(i8.min, np.float32, i8.min, False)
self._compare(i8.max, np.float32, i8.max, False)
self._compare(i4.min, np.float64, i4.min, False)
self._compare(i4.max, np.float64, i4.max, False)
self._compare(i8.min, np.float64, i8.min, False)
self._compare(i8.max, np.float64, i8.max, False)
# NOTE: GPU does not support int32/int64 for casting.
def testInfNan(self):
i4 = np.iinfo(np.int32)
i8 = np.iinfo(np.int64)
self._compare(np.inf, np.float32, np.inf, False)
self._compare(np.inf, np.float64, np.inf, False)
if sys.byteorder == "big":
self._compare(np.inf, np.int32, i4.max, False)
self._compare(np.inf, np.int64, i8.max, False)
else:
self._compare(np.inf, np.int32, i4.min, False)
self._compare(np.inf, np.int64, i8.min, False)
self._compare(-np.inf, np.float32, -np.inf, False)
self._compare(-np.inf, np.float64, -np.inf, False)
self._compare(-np.inf, np.int32, i4.min, False)
self._compare(-np.inf, np.int64, i8.min, False)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, False)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, False)), True)
self._compare(np.nan, np.int32, i4.min, False)
self._compare(np.nan, np.int64, i8.min, False)
self._compare(np.inf, np.float32, np.inf, True)
self._compare(np.inf, np.float64, np.inf, True)
self._compare(-np.inf, np.float32, -np.inf, True)
self._compare(-np.inf, np.float64, -np.inf, True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float32, True)), True)
self.assertAllEqual(np.isnan(self._cast(np.nan, np.float64, True)), True)
def _OpError(self, x, dtype, err):
with self.test_session():
with self.assertRaisesOpError(err):
math_ops.cast(x, dtype).eval()
def testNotImplemented(self):
self._OpError(np.arange(0, 10), dtypes.string, "Cast.*int64.*string.*")
def testCastToTypeOfVariable(self):
with self.test_session() as sess:
x = variables.Variable(5, dtype=dtypes.float32)
y = variables.Variable(True, dtype=dtypes.bool)
cast = math_ops.cast(y, x.dtype)
variables.global_variables_initializer().run()
self.assertEqual(1.0, sess.run(cast))
def testGradients(self):
t = [dtypes.float32, dtypes.float64, dtypes.complex64, dtypes.complex128]
for src_t in t:
for dst_t in t:
with self.test_session():
x = constant_op.constant(1.0, src_t)
z = array_ops.identity(x)
y = math_ops.cast(z, dst_t)
err = gradient_checker.compute_gradient_error(x, [], y, [])
self.assertLess(err, 1e-3)
class SparseTensorCastTest(test.TestCase):
def testCast(self):
indices = constant_op.constant([[0], [1], [2]], dtypes.int64)
values = constant_op.constant(np.array([1, 2, 3], np.int64))
shape = constant_op.constant([3], dtypes.int64)
st = sparse_tensor.SparseTensor(indices, values, shape)
st_cast = math_ops.cast(st, dtypes.float32)
with self.test_session():
self.assertAllEqual(st_cast.indices.eval(), [[0], [1], [2]])
self.assertAllEqual(st_cast.values.eval(),
np.array([1, 2, 3], np.float32))
self.assertAllEqual(st_cast.dense_shape.eval(), [3])
class SaturateCastTest(test.TestCase):
def testSaturate(self):
in_types = dtypes.float32,
out_types = dtypes.int8, dtypes.uint8, dtypes.int16, dtypes.float32
with self.test_session() as sess:
for in_type in in_types:
for out_type in out_types:
lo, hi = in_type.min, in_type.max
x = constant_op.constant(
[lo, lo + 1, lo // 2, hi // 2, hi - 1, hi], dtype=in_type)
y = math_ops.saturate_cast(x, dtype=out_type)
self.assertEqual(y.dtype, out_type)
x, y = sess.run([x, y])
correct = np.maximum(out_type.min, np.minimum(out_type.max, x))
self.assertAllEqual(correct, y)
if __name__ == "__main__":
test.main()
| apache-2.0 |
qusp/orange3 | Orange/canvas/document/tests/test_quickmenu.py | 10 | 2542 | from PyQt4.QtGui import QStringListModel
from PyQt4.QtCore import QPoint
from ..quickmenu import QuickMenu, SuggestMenuPage, FlattenedTreeItemModel, \
MenuPage, QAction
from ...gui.test import QAppTestCase
from ...registry import global_registry
from ...registry.qt import QtWidgetRegistry
class TestMenu(QAppTestCase):
def test_menu(self):
menu = QuickMenu()
def triggered(action):
print("Triggered", action.text())
def hovered(action):
print("Hover", action.text())
menu.triggered.connect(triggered)
menu.hovered.connect(hovered)
items_page = MenuPage()
model = QStringListModel(["one", "two", "file not found"])
items_page.setModel(model)
menu.addPage("w", items_page)
page_c = MenuPage()
menu.addPage("c", page_c)
menu.popup(QPoint(200, 200))
menu.activateWindow()
self.app.exec_()
def test_menu_with_registry(self):
registry = QtWidgetRegistry(global_registry())
menu = QuickMenu()
menu.setModel(registry.model())
triggered_action = []
def triggered(action):
print("Triggered", action.text())
self.assertIsInstance(action, QAction)
triggered_action.append(action)
def hovered(action):
self.assertIsInstance(action, QAction)
print("Hover", action.text())
menu.triggered.connect(triggered)
menu.hovered.connect(hovered)
self.app.setActiveWindow(menu)
rval = menu.exec_(QPoint(200, 200))
if triggered_action:
self.assertIs(triggered_action[0], rval)
def test_search(self):
registry = QtWidgetRegistry(global_registry())
menu = SuggestMenuPage()
menu.setModel(registry.model())
menu.show()
menu.setFilterFixedString("la")
self.singleShot(2500, lambda: menu.setFilterFixedString("ba"))
self.singleShot(5000, lambda: menu.setFilterFixedString("ab"))
self.app.exec_()
def test_flattened_model(self):
model = QStringListModel(["0", "1", "2", "3"])
flat = FlattenedTreeItemModel()
flat.setSourceModel(model)
def get(row):
return flat.index(row, 0).data()
self.assertEqual(get(0), "0")
self.assertEqual(get(1), "1")
self.assertEqual(get(3), "3")
self.assertEqual(flat.rowCount(), model.rowCount())
self.assertEqual(flat.columnCount(), 1)
| bsd-2-clause |
kazemakase/scikit-learn | sklearn/feature_extraction/text.py | 24 | 50103 | # -*- coding: utf-8 -*-
# Authors: Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Robert Layton <robertlayton@gmail.com>
# Jochen Wersdörfer <jochen@wersdoerfer.de>
# Roman Sinayev <roman.sinayev@gmail.com>
#
# License: BSD 3 clause
"""
The :mod:`sklearn.feature_extraction.text` submodule gathers utilities to
build feature vectors from text documents.
"""
from __future__ import unicode_literals
import array
from collections import Mapping, defaultdict
import numbers
from operator import itemgetter
import re
import unicodedata
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..externals.six.moves import xrange
from ..preprocessing import normalize
from .hashing import FeatureHasher
from .stop_words import ENGLISH_STOP_WORDS
from ..utils import deprecated
from ..utils.fixes import frombuffer_empty, bincount
from ..utils.validation import check_is_fitted
__all__ = ['CountVectorizer',
'ENGLISH_STOP_WORDS',
'TfidfTransformer',
'TfidfVectorizer',
'strip_accents_ascii',
'strip_accents_unicode',
'strip_tags']
def strip_accents_unicode(s):
"""Transform accentuated unicode symbols into their simple counterpart
Warning: the python-level loop and join operations make this
implementation 20 times slower than the strip_accents_ascii basic
normalization.
See also
--------
strip_accents_ascii
Remove accentuated char for any unicode symbol that has a direct
ASCII equivalent.
"""
return ''.join([c for c in unicodedata.normalize('NFKD', s)
if not unicodedata.combining(c)])
def strip_accents_ascii(s):
"""Transform accentuated unicode symbols into ascii or nothing
Warning: this solution is only suited for languages that have a direct
transliteration to ASCII symbols.
See also
--------
strip_accents_unicode
Remove accentuated char for any unicode symbol.
"""
nkfd_form = unicodedata.normalize('NFKD', s)
return nkfd_form.encode('ASCII', 'ignore').decode('ASCII')
def strip_tags(s):
"""Basic regexp based HTML / XML tag stripper function
For serious HTML/XML preprocessing you should rather use an external
library such as lxml or BeautifulSoup.
"""
return re.compile(r"<([^>]+)>", flags=re.UNICODE).sub(" ", s)
def _check_stop_list(stop):
if stop == "english":
return ENGLISH_STOP_WORDS
elif isinstance(stop, six.string_types):
raise ValueError("not a built-in stop list: %s" % stop)
else: # assume it's a collection
return stop
class VectorizerMixin(object):
"""Provides common code for text vectorizers (tokenization logic)."""
_white_spaces = re.compile(r"\s\s+")
def decode(self, doc):
"""Decode the input into a string of unicode symbols
The decoding strategy depends on the vectorizer parameters.
"""
if self.input == 'filename':
with open(doc, 'rb') as fh:
doc = fh.read()
elif self.input == 'file':
doc = doc.read()
if isinstance(doc, bytes):
doc = doc.decode(self.encoding, self.decode_error)
if doc is np.nan:
raise ValueError("np.nan is an invalid document, expected byte or "
"unicode string.")
return doc
def _word_ngrams(self, tokens, stop_words=None):
"""Turn tokens into a sequence of n-grams after stop words filtering"""
# handle stop words
if stop_words is not None:
tokens = [w for w in tokens if w not in stop_words]
# handle token n-grams
min_n, max_n = self.ngram_range
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in xrange(min_n,
min(max_n + 1, n_original_tokens + 1)):
for i in xrange(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
def _char_ngrams(self, text_document):
"""Tokenize text_document into a sequence of character n-grams"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
text_len = len(text_document)
ngrams = []
min_n, max_n = self.ngram_range
for n in xrange(min_n, min(max_n + 1, text_len + 1)):
for i in xrange(text_len - n + 1):
ngrams.append(text_document[i: i + n])
return ngrams
def _char_wb_ngrams(self, text_document):
"""Whitespace sensitive char-n-gram tokenization.
Tokenize text_document into a sequence of character n-grams
excluding any whitespace (operating only inside word boundaries)"""
# normalize white spaces
text_document = self._white_spaces.sub(" ", text_document)
min_n, max_n = self.ngram_range
ngrams = []
for w in text_document.split():
w = ' ' + w + ' '
w_len = len(w)
for n in xrange(min_n, max_n + 1):
offset = 0
ngrams.append(w[offset:offset + n])
while offset + n < w_len:
offset += 1
ngrams.append(w[offset:offset + n])
if offset == 0: # count a short word (w_len < n) only once
break
return ngrams
def build_preprocessor(self):
"""Return a function to preprocess the text before tokenization"""
if self.preprocessor is not None:
return self.preprocessor
# unfortunately python functools package does not have an efficient
# `compose` function that would have allowed us to chain a dynamic
# number of functions. However the cost of a lambda call is a few
# hundreds of nanoseconds which is negligible when compared to the
# cost of tokenizing a string of 1000 chars for instance.
noop = lambda x: x
# accent stripping
if not self.strip_accents:
strip_accents = noop
elif callable(self.strip_accents):
strip_accents = self.strip_accents
elif self.strip_accents == 'ascii':
strip_accents = strip_accents_ascii
elif self.strip_accents == 'unicode':
strip_accents = strip_accents_unicode
else:
raise ValueError('Invalid value for "strip_accents": %s' %
self.strip_accents)
if self.lowercase:
return lambda x: strip_accents(x.lower())
else:
return strip_accents
def build_tokenizer(self):
"""Return a function that splits a string into a sequence of tokens"""
if self.tokenizer is not None:
return self.tokenizer
token_pattern = re.compile(self.token_pattern)
return lambda doc: token_pattern.findall(doc)
def get_stop_words(self):
"""Build or fetch the effective stop words list"""
return _check_stop_list(self.stop_words)
def build_analyzer(self):
"""Return a callable that handles preprocessing and tokenization"""
if callable(self.analyzer):
return self.analyzer
preprocess = self.build_preprocessor()
if self.analyzer == 'char':
return lambda doc: self._char_ngrams(preprocess(self.decode(doc)))
elif self.analyzer == 'char_wb':
return lambda doc: self._char_wb_ngrams(
preprocess(self.decode(doc)))
elif self.analyzer == 'word':
stop_words = self.get_stop_words()
tokenize = self.build_tokenizer()
return lambda doc: self._word_ngrams(
tokenize(preprocess(self.decode(doc))), stop_words)
else:
raise ValueError('%s is not a valid tokenization scheme/analyzer' %
self.analyzer)
def _validate_vocabulary(self):
vocabulary = self.vocabulary
if vocabulary is not None:
if not isinstance(vocabulary, Mapping):
vocab = {}
for i, t in enumerate(vocabulary):
if vocab.setdefault(t, i) != i:
msg = "Duplicate term in vocabulary: %r" % t
raise ValueError(msg)
vocabulary = vocab
else:
indices = set(six.itervalues(vocabulary))
if len(indices) != len(vocabulary):
raise ValueError("Vocabulary contains repeated indices.")
for i in xrange(len(vocabulary)):
if i not in indices:
msg = ("Vocabulary of size %d doesn't contain index "
"%d." % (len(vocabulary), i))
raise ValueError(msg)
if not vocabulary:
raise ValueError("empty vocabulary passed to fit")
self.fixed_vocabulary_ = True
self.vocabulary_ = dict(vocabulary)
else:
self.fixed_vocabulary_ = False
def _check_vocabulary(self):
"""Check if vocabulary is empty or missing (not fit-ed)"""
msg = "%(name)s - Vocabulary wasn't fitted."
check_is_fitted(self, 'vocabulary_', msg=msg),
if len(self.vocabulary_) == 0:
raise ValueError("Vocabulary is empty")
@property
@deprecated("The `fixed_vocabulary` attribute is deprecated and will be "
"removed in 0.18. Please use `fixed_vocabulary_` instead.")
def fixed_vocabulary(self):
return self.fixed_vocabulary_
class HashingVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token occurrences
It turns a collection of text documents into a scipy.sparse matrix holding
token occurrence counts (or binary occurrence information), possibly
normalized as token frequencies if norm='l1' or projected on the euclidean
unit sphere if norm='l2'.
This text vectorizer implementation uses the hashing trick to find the
token string name to feature integer index mapping.
This strategy has several advantages:
- it is very low memory scalable to large datasets as there is no need to
store a vocabulary dictionary in memory
- it is fast to pickle and un-pickle as it holds no state besides the
constructor parameters
- it can be used in a streaming (partial fit) or parallel pipeline as there
is no state computed during fit.
There are also a couple of cons (vs using a CountVectorizer with an
in-memory vocabulary):
- there is no way to compute the inverse transform (from feature indices to
string feature names) which can be a problem when trying to introspect
which features are most important to a model.
- there can be collisions: distinct tokens can be mapped to the same
feature index. However in practice this is rarely an issue if n_features
is large enough (e.g. 2 ** 18 for text classification problems).
- no IDF weighting as this would render the transformer stateful.
The hash function employed is the signed 32-bit version of Murmurhash3.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, default='utf-8'
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n), default=(1, 1)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
lowercase : boolean, default=True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
n_features : integer, default=(2 ** 20)
The number of features (columns) in the output matrices. Small numbers
of features are likely to cause hash collisions, but large numbers
will cause larger coefficient dimensions in linear learners.
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
binary: boolean, default=False.
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype: type, optional
Type of the matrix returned by fit_transform() or transform().
non_negative : boolean, default=False
Whether output matrices should contain non-negative values only;
effectively calls abs on the matrix prior to returning it.
When True, output values can be interpreted as frequencies.
When False, output values will have expected value zero.
See also
--------
CountVectorizer, TfidfVectorizer
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word', n_features=(2 ** 20),
binary=False, norm='l2', non_negative=False,
dtype=np.float64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.n_features = n_features
self.ngram_range = ngram_range
self.binary = binary
self.norm = norm
self.non_negative = non_negative
self.dtype = dtype
def partial_fit(self, X, y=None):
"""Does nothing: this transformer is stateless.
This method is just there to mark the fact that this transformer
can work in a streaming setup.
"""
return self
def fit(self, X, y=None):
"""Does nothing: this transformer is stateless."""
# triggers a parameter validation
self._get_hasher().fit(X, y=y)
return self
def transform(self, X, y=None):
"""Transform a sequence of documents to a document-term matrix.
Parameters
----------
X : iterable over raw text documents, length = n_samples
Samples. Each sample must be a text document (either bytes or
unicode strings, file name or file object depending on the
constructor argument) which will be tokenized and hashed.
y : (ignored)
Returns
-------
X : scipy.sparse matrix, shape = (n_samples, self.n_features)
Document-term matrix.
"""
analyzer = self.build_analyzer()
X = self._get_hasher().transform(analyzer(doc) for doc in X)
if self.binary:
X.data.fill(1)
if self.norm is not None:
X = normalize(X, norm=self.norm, copy=False)
return X
# Alias transform to fit_transform for convenience
fit_transform = transform
def _get_hasher(self):
return FeatureHasher(n_features=self.n_features,
input_type='string', dtype=self.dtype,
non_negative=self.non_negative)
def _document_frequency(X):
"""Count the number of non-zero values for each feature in sparse X."""
if sp.isspmatrix_csr(X):
return bincount(X.indices, minlength=X.shape[1])
else:
return np.diff(sp.csc_matrix(X, copy=False).indptr)
class CountVectorizer(BaseEstimator, VectorizerMixin):
"""Convert a collection of text documents to a matrix of token counts
This implementation produces a sparse representation of the counts using
scipy.sparse.coo_matrix.
If you do not provide an a-priori dictionary and you do not use an analyzer
that does some kind of feature selection then the number of features will
be equal to the vocabulary size found by analyzing the data.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char', 'char_wb'} or callable
Whether the feature should be made of word or character n-grams.
Option 'char_wb' creates character n-grams only from text inside
word boundaries.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
Only applies if ``analyzer == 'word'``.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If 'english', a built-in stop word list for English is used.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, True by default
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp select tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents. Indices
in the mapping should not be repeated and should not have any gap
between 0 and the largest index.
binary : boolean, default=False
If True, all non zero counts are set to 1. This is useful for discrete
probabilistic models that model binary events rather than integer
counts.
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
Attributes
----------
vocabulary_ : dict
A mapping of terms to feature indices.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
HashingVectorizer, TfidfVectorizer
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None,
lowercase=True, preprocessor=None, tokenizer=None,
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), analyzer='word',
max_df=1.0, min_df=1, max_features=None,
vocabulary=None, binary=False, dtype=np.int64):
self.input = input
self.encoding = encoding
self.decode_error = decode_error
self.strip_accents = strip_accents
self.preprocessor = preprocessor
self.tokenizer = tokenizer
self.analyzer = analyzer
self.lowercase = lowercase
self.token_pattern = token_pattern
self.stop_words = stop_words
self.max_df = max_df
self.min_df = min_df
if max_df < 0 or min_df < 0:
raise ValueError("negative value for max_df of min_df")
self.max_features = max_features
if max_features is not None:
if (not isinstance(max_features, numbers.Integral) or
max_features <= 0):
raise ValueError(
"max_features=%r, neither a positive integer nor None"
% max_features)
self.ngram_range = ngram_range
self.vocabulary = vocabulary
self.binary = binary
self.dtype = dtype
def _sort_features(self, X, vocabulary):
"""Sort features by name
Returns a reordered matrix and modifies the vocabulary in place
"""
sorted_features = sorted(six.iteritems(vocabulary))
map_index = np.empty(len(sorted_features), dtype=np.int32)
for new_val, (term, old_val) in enumerate(sorted_features):
map_index[new_val] = old_val
vocabulary[term] = new_val
return X[:, map_index]
def _limit_features(self, X, vocabulary, high=None, low=None,
limit=None):
"""Remove too rare or too common features.
Prune features that are non zero in more samples than high or less
documents than low, modifying the vocabulary, and restricting it to
at most the limit most frequent.
This does not prune samples with zero features.
"""
if high is None and low is None and limit is None:
return X, set()
# Calculate a mask based on document frequencies
dfs = _document_frequency(X)
tfs = np.asarray(X.sum(axis=0)).ravel()
mask = np.ones(len(dfs), dtype=bool)
if high is not None:
mask &= dfs <= high
if low is not None:
mask &= dfs >= low
if limit is not None and mask.sum() > limit:
mask_inds = (-tfs[mask]).argsort()[:limit]
new_mask = np.zeros(len(dfs), dtype=bool)
new_mask[np.where(mask)[0][mask_inds]] = True
mask = new_mask
new_indices = np.cumsum(mask) - 1 # maps old indices to new
removed_terms = set()
for term, old_index in list(six.iteritems(vocabulary)):
if mask[old_index]:
vocabulary[term] = new_indices[old_index]
else:
del vocabulary[term]
removed_terms.add(term)
kept_indices = np.where(mask)[0]
if len(kept_indices) == 0:
raise ValueError("After pruning, no terms remain. Try a lower"
" min_df or a higher max_df.")
return X[:, kept_indices], removed_terms
def _count_vocab(self, raw_documents, fixed_vocab):
"""Create sparse feature matrix, and vocabulary where fixed_vocab=False
"""
if fixed_vocab:
vocabulary = self.vocabulary_
else:
# Add a new value when a new vocabulary item is seen
vocabulary = defaultdict()
vocabulary.default_factory = vocabulary.__len__
analyze = self.build_analyzer()
j_indices = _make_int_array()
indptr = _make_int_array()
indptr.append(0)
for doc in raw_documents:
for feature in analyze(doc):
try:
j_indices.append(vocabulary[feature])
except KeyError:
# Ignore out-of-vocabulary items for fixed_vocab=True
continue
indptr.append(len(j_indices))
if not fixed_vocab:
# disable defaultdict behaviour
vocabulary = dict(vocabulary)
if not vocabulary:
raise ValueError("empty vocabulary; perhaps the documents only"
" contain stop words")
j_indices = frombuffer_empty(j_indices, dtype=np.intc)
indptr = np.frombuffer(indptr, dtype=np.intc)
values = np.ones(len(j_indices))
X = sp.csr_matrix((values, j_indices, indptr),
shape=(len(indptr) - 1, len(vocabulary)),
dtype=self.dtype)
X.sum_duplicates()
return vocabulary, X
def fit(self, raw_documents, y=None):
"""Learn a vocabulary dictionary of all tokens in the raw documents.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
self
"""
self.fit_transform(raw_documents)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
# We intentionally don't call the transform method to make
# fit_transform overridable without unwanted side effects in
# TfidfVectorizer.
self._validate_vocabulary()
max_df = self.max_df
min_df = self.min_df
max_features = self.max_features
vocabulary, X = self._count_vocab(raw_documents,
self.fixed_vocabulary_)
if self.binary:
X.data.fill(1)
if not self.fixed_vocabulary_:
X = self._sort_features(X, vocabulary)
n_doc = X.shape[0]
max_doc_count = (max_df
if isinstance(max_df, numbers.Integral)
else max_df * n_doc)
min_doc_count = (min_df
if isinstance(min_df, numbers.Integral)
else min_df * n_doc)
if max_doc_count < min_doc_count:
raise ValueError(
"max_df corresponds to < documents than min_df")
X, self.stop_words_ = self._limit_features(X, vocabulary,
max_doc_count,
min_doc_count,
max_features)
self.vocabulary_ = vocabulary
return X
def transform(self, raw_documents):
"""Transform documents to document-term matrix.
Extract token counts out of raw text documents using the vocabulary
fitted with fit or the one provided to the constructor.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Document-term matrix.
"""
if not hasattr(self, 'vocabulary_'):
self._validate_vocabulary()
self._check_vocabulary()
# use the same matrix-building strategy as fit_transform
_, X = self._count_vocab(raw_documents, fixed_vocab=True)
if self.binary:
X.data.fill(1)
return X
def inverse_transform(self, X):
"""Return terms per document with nonzero entries in X.
Parameters
----------
X : {array, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
X_inv : list of arrays, len = n_samples
List of arrays of terms.
"""
self._check_vocabulary()
if sp.issparse(X):
# We need CSR format for fast row manipulations.
X = X.tocsr()
else:
# We need to convert X to a matrix, so that the indexing
# returns 2D objects
X = np.asmatrix(X)
n_samples = X.shape[0]
terms = np.array(list(self.vocabulary_.keys()))
indices = np.array(list(self.vocabulary_.values()))
inverse_vocabulary = terms[np.argsort(indices)]
return [inverse_vocabulary[X[i, :].nonzero()[1]].ravel()
for i in range(n_samples)]
def get_feature_names(self):
"""Array mapping from feature integer indices to feature name"""
self._check_vocabulary()
return [t for t, i in sorted(six.iteritems(self.vocabulary_),
key=itemgetter(1))]
def _make_int_array():
"""Construct an array.array of a type suitable for scipy.sparse indices."""
return array.array(str("i"))
class TfidfTransformer(BaseEstimator, TransformerMixin):
"""Transform a count matrix to a normalized tf or tf-idf representation
Tf means term-frequency while tf-idf means term-frequency times inverse
document-frequency. This is a common term weighting scheme in information
retrieval, that has also found good use in document classification.
The goal of using tf-idf instead of the raw frequencies of occurrence of a
token in a given document is to scale down the impact of tokens that occur
very frequently in a given corpus and that are hence empirically less
informative than features that occur in a small fraction of the training
corpus.
The actual formula used for tf-idf is tf * (idf + 1) = tf + tf * idf,
instead of tf * idf. The effect of this is that terms with zero idf, i.e.
that occur in all documents of a training set, will not be entirely
ignored. The formulas used to compute tf and idf depend on parameter
settings that correspond to the SMART notation used in IR, as follows:
Tf is "n" (natural) by default, "l" (logarithmic) when sublinear_tf=True.
Idf is "t" when use_idf is given, "n" (none) otherwise.
Normalization is "c" (cosine) when norm='l2', "n" (none) when norm=None.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
References
----------
.. [Yates2011] `R. Baeza-Yates and B. Ribeiro-Neto (2011). Modern
Information Retrieval. Addison Wesley, pp. 68-74.`
.. [MRS2008] `C.D. Manning, P. Raghavan and H. Schuetze (2008).
Introduction to Information Retrieval. Cambridge University
Press, pp. 118-120.`
"""
def __init__(self, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
self.norm = norm
self.use_idf = use_idf
self.smooth_idf = smooth_idf
self.sublinear_tf = sublinear_tf
def fit(self, X, y=None):
"""Learn the idf vector (global term weights)
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
"""
if not sp.issparse(X):
X = sp.csc_matrix(X)
if self.use_idf:
n_samples, n_features = X.shape
df = _document_frequency(X)
# perform idf smoothing if required
df += int(self.smooth_idf)
n_samples += int(self.smooth_idf)
# log+1 instead of log makes sure terms with zero idf don't get
# suppressed entirely.
idf = np.log(float(n_samples) / df) + 1.0
self._idf_diag = sp.spdiags(idf,
diags=0, m=n_features, n=n_features)
return self
def transform(self, X, copy=True):
"""Transform a count matrix to a tf or tf-idf representation
Parameters
----------
X : sparse matrix, [n_samples, n_features]
a matrix of term/token counts
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
vectors : sparse matrix, [n_samples, n_features]
"""
if hasattr(X, 'dtype') and np.issubdtype(X.dtype, np.float):
# preserve float family dtype
X = sp.csr_matrix(X, copy=copy)
else:
# convert counts or binary occurrences to floats
X = sp.csr_matrix(X, dtype=np.float64, copy=copy)
n_samples, n_features = X.shape
if self.sublinear_tf:
np.log(X.data, X.data)
X.data += 1
if self.use_idf:
check_is_fitted(self, '_idf_diag', 'idf vector is not fitted')
expected_n_features = self._idf_diag.shape[0]
if n_features != expected_n_features:
raise ValueError("Input has n_features=%d while the model"
" has been trained with n_features=%d" % (
n_features, expected_n_features))
# *= doesn't work
X = X * self._idf_diag
if self.norm:
X = normalize(X, norm=self.norm, copy=False)
return X
@property
def idf_(self):
if hasattr(self, "_idf_diag"):
return np.ravel(self._idf_diag.sum(axis=0))
else:
return None
class TfidfVectorizer(CountVectorizer):
"""Convert a collection of raw documents to a matrix of TF-IDF features.
Equivalent to CountVectorizer followed by TfidfTransformer.
Read more in the :ref:`User Guide <text_feature_extraction>`.
Parameters
----------
input : string {'filename', 'file', 'content'}
If 'filename', the sequence passed as an argument to fit is
expected to be a list of filenames that need reading to fetch
the raw content to analyze.
If 'file', the sequence items must have a 'read' method (file-like
object) that is called to fetch the bytes in memory.
Otherwise the input is expected to be the sequence strings or
bytes items are expected to be analyzed directly.
encoding : string, 'utf-8' by default.
If bytes or files are given to analyze, this encoding is used to
decode.
decode_error : {'strict', 'ignore', 'replace'}
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. By default, it is
'strict', meaning that a UnicodeDecodeError will be raised. Other
values are 'ignore' and 'replace'.
strip_accents : {'ascii', 'unicode', None}
Remove accents during the preprocessing step.
'ascii' is a fast method that only works on characters that have
an direct ASCII mapping.
'unicode' is a slightly slower method that works on any characters.
None (default) does nothing.
analyzer : string, {'word', 'char'} or callable
Whether the feature should be made of word or character n-grams.
If a callable is passed it is used to extract the sequence of features
out of the raw, unprocessed input.
preprocessor : callable or None (default)
Override the preprocessing (string transformation) stage while
preserving the tokenizing and n-grams generation steps.
tokenizer : callable or None (default)
Override the string tokenization step while preserving the
preprocessing and n-grams generation steps.
Only applies if ``analyzer == 'word'``.
ngram_range : tuple (min_n, max_n)
The lower and upper boundary of the range of n-values for different
n-grams to be extracted. All values of n such that min_n <= n <= max_n
will be used.
stop_words : string {'english'}, list, or None (default)
If a string, it is passed to _check_stop_list and the appropriate stop
list is returned. 'english' is currently the only supported string
value.
If a list, that list is assumed to contain stop words, all of which
will be removed from the resulting tokens.
Only applies if ``analyzer == 'word'``.
If None, no stop words will be used. max_df can be set to a value
in the range [0.7, 1.0) to automatically detect and filter stop
words based on intra corpus document frequency of terms.
lowercase : boolean, default True
Convert all characters to lowercase before tokenizing.
token_pattern : string
Regular expression denoting what constitutes a "token", only used
if ``analyzer == 'word'``. The default regexp selects tokens of 2
or more alphanumeric characters (punctuation is completely ignored
and always treated as a token separator).
max_df : float in range [0.0, 1.0] or int, default=1.0
When building the vocabulary ignore terms that have a document
frequency strictly higher than the given threshold (corpus-specific
stop words).
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
min_df : float in range [0.0, 1.0] or int, default=1
When building the vocabulary ignore terms that have a document
frequency strictly lower than the given threshold. This value is also
called cut-off in the literature.
If float, the parameter represents a proportion of documents, integer
absolute counts.
This parameter is ignored if vocabulary is not None.
max_features : int or None, default=None
If not None, build a vocabulary that only consider the top
max_features ordered by term frequency across the corpus.
This parameter is ignored if vocabulary is not None.
vocabulary : Mapping or iterable, optional
Either a Mapping (e.g., a dict) where keys are terms and values are
indices in the feature matrix, or an iterable over terms. If not
given, a vocabulary is determined from the input documents.
binary : boolean, default=False
If True, all non-zero term counts are set to 1. This does not mean
outputs will have only 0/1 values, only that the tf term in tf-idf
is binary. (Set idf and normalization to False to get 0/1 outputs.)
dtype : type, optional
Type of the matrix returned by fit_transform() or transform().
norm : 'l1', 'l2' or None, optional
Norm used to normalize term vectors. None for no normalization.
use_idf : boolean, default=True
Enable inverse-document-frequency reweighting.
smooth_idf : boolean, default=True
Smooth idf weights by adding one to document frequencies, as if an
extra document was seen containing every term in the collection
exactly once. Prevents zero divisions.
sublinear_tf : boolean, default=False
Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf).
Attributes
----------
idf_ : array, shape = [n_features], or None
The learned idf vector (global term weights)
when ``use_idf`` is set to True, None otherwise.
stop_words_ : set
Terms that were ignored because they either:
- occurred in too many documents (`max_df`)
- occurred in too few documents (`min_df`)
- were cut off by feature selection (`max_features`).
This is only available if no vocabulary was given.
See also
--------
CountVectorizer
Tokenize the documents and count the occurrences of token and return
them as a sparse matrix
TfidfTransformer
Apply Term Frequency Inverse Document Frequency normalization to a
sparse matrix of occurrence counts.
Notes
-----
The ``stop_words_`` attribute can get large and increase the model size
when pickling. This attribute is provided only for introspection and can
be safely removed using delattr or set to None before pickling.
"""
def __init__(self, input='content', encoding='utf-8',
decode_error='strict', strip_accents=None, lowercase=True,
preprocessor=None, tokenizer=None, analyzer='word',
stop_words=None, token_pattern=r"(?u)\b\w\w+\b",
ngram_range=(1, 1), max_df=1.0, min_df=1,
max_features=None, vocabulary=None, binary=False,
dtype=np.int64, norm='l2', use_idf=True, smooth_idf=True,
sublinear_tf=False):
super(TfidfVectorizer, self).__init__(
input=input, encoding=encoding, decode_error=decode_error,
strip_accents=strip_accents, lowercase=lowercase,
preprocessor=preprocessor, tokenizer=tokenizer, analyzer=analyzer,
stop_words=stop_words, token_pattern=token_pattern,
ngram_range=ngram_range, max_df=max_df, min_df=min_df,
max_features=max_features, vocabulary=vocabulary, binary=binary,
dtype=dtype)
self._tfidf = TfidfTransformer(norm=norm, use_idf=use_idf,
smooth_idf=smooth_idf,
sublinear_tf=sublinear_tf)
# Broadcast the TF-IDF parameters to the underlying transformer instance
# for easy grid search and repr
@property
def norm(self):
return self._tfidf.norm
@norm.setter
def norm(self, value):
self._tfidf.norm = value
@property
def use_idf(self):
return self._tfidf.use_idf
@use_idf.setter
def use_idf(self, value):
self._tfidf.use_idf = value
@property
def smooth_idf(self):
return self._tfidf.smooth_idf
@smooth_idf.setter
def smooth_idf(self, value):
self._tfidf.smooth_idf = value
@property
def sublinear_tf(self):
return self._tfidf.sublinear_tf
@sublinear_tf.setter
def sublinear_tf(self, value):
self._tfidf.sublinear_tf = value
@property
def idf_(self):
return self._tfidf.idf_
def fit(self, raw_documents, y=None):
"""Learn vocabulary and idf from training set.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
self : TfidfVectorizer
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
return self
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
X = super(TfidfVectorizer, self).fit_transform(raw_documents)
self._tfidf.fit(X)
# X is already a transformed view of raw_documents so
# we set copy to False
return self._tfidf.transform(X, copy=False)
def transform(self, raw_documents, copy=True):
"""Transform documents to document-term matrix.
Uses the vocabulary and document frequencies (df) learned by fit (or
fit_transform).
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
copy : boolean, default True
Whether to copy X and operate on the copy or perform in-place
operations.
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
check_is_fitted(self, '_tfidf', 'The tfidf vector is not fitted')
X = super(TfidfVectorizer, self).transform(raw_documents)
return self._tfidf.transform(X, copy=False)
| bsd-3-clause |
2014c2g12/c2g12 | wsgi/exts/w2/static/Brython2.0.0-20140209-164925/Lib/unittest/test/testmock/testpatch.py | 739 | 53126 | # Copyright (C) 2007-2012 Michael Foord & the mock team
# E-mail: fuzzyman AT voidspace DOT org DOT uk
# http://www.voidspace.org.uk/python/mock/
import os
import sys
import unittest
from unittest.test.testmock import support
from unittest.test.testmock.support import SomeClass, is_instance
from unittest.mock import (
NonCallableMock, CallableMixin, patch, sentinel,
MagicMock, Mock, NonCallableMagicMock, patch, _patch,
DEFAULT, call, _get_target
)
builtin_string = 'builtins'
PTModule = sys.modules[__name__]
MODNAME = '%s.PTModule' % __name__
def _get_proxy(obj, get_only=True):
class Proxy(object):
def __getattr__(self, name):
return getattr(obj, name)
if not get_only:
def __setattr__(self, name, value):
setattr(obj, name, value)
def __delattr__(self, name):
delattr(obj, name)
Proxy.__setattr__ = __setattr__
Proxy.__delattr__ = __delattr__
return Proxy()
# for use in the test
something = sentinel.Something
something_else = sentinel.SomethingElse
class Foo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
foo_name = '%s.Foo' % __name__
def function(a, b=Foo):
pass
class Container(object):
def __init__(self):
self.values = {}
def __getitem__(self, name):
return self.values[name]
def __setitem__(self, name, value):
self.values[name] = value
def __delitem__(self, name):
del self.values[name]
def __iter__(self):
return iter(self.values)
class PatchTest(unittest.TestCase):
def assertNotCallable(self, obj, magic=True):
MockClass = NonCallableMagicMock
if not magic:
MockClass = NonCallableMock
self.assertRaises(TypeError, obj)
self.assertTrue(is_instance(obj, MockClass))
self.assertFalse(is_instance(obj, CallableMixin))
def test_single_patchobject(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patchobject_with_none(self):
class Something(object):
attribute = sentinel.Original
@patch.object(Something, 'attribute', None)
def test():
self.assertIsNone(Something.attribute, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_multiple_patchobject(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'next_attribute', sentinel.Patched2)
def test():
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
self.assertEqual(Something.next_attribute, sentinel.Patched2,
"unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(Something.next_attribute, sentinel.Original2,
"patch not restored")
def test_object_lookup_is_quite_lazy(self):
global something
original = something
@patch('%s.something' % __name__, sentinel.Something2)
def test():
pass
try:
something = sentinel.replacement_value
test()
self.assertEqual(something, sentinel.replacement_value)
finally:
something = original
def test_patch(self):
@patch('%s.something' % __name__, sentinel.Something2)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
@patch('%s.something' % __name__, sentinel.Something2)
@patch('%s.something_else' % __name__, sentinel.SomethingElse)
def test():
self.assertEqual(PTModule.something, sentinel.Something2,
"unpatched")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"unpatched")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
# Test the patching and restoring works a second time
test()
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
self.assertEqual(PTModule.something_else, sentinel.SomethingElse,
"patch not restored")
mock = Mock()
mock.return_value = sentinel.Handle
@patch('%s.open' % builtin_string, mock)
def test():
self.assertEqual(open('filename', 'r'), sentinel.Handle,
"open not patched")
test()
test()
self.assertNotEqual(open, mock, "patch not restored")
def test_patch_class_attribute(self):
@patch('%s.SomeClass.class_attribute' % __name__,
sentinel.ClassAttribute)
def test():
self.assertEqual(PTModule.SomeClass.class_attribute,
sentinel.ClassAttribute, "unpatched")
test()
self.assertIsNone(PTModule.SomeClass.class_attribute,
"patch not restored")
def test_patchobject_with_default_mock(self):
class Test(object):
something = sentinel.Original
something2 = sentinel.Original2
@patch.object(Test, 'something')
def test(mock):
self.assertEqual(mock, Test.something,
"Mock not passed into test function")
self.assertIsInstance(mock, MagicMock,
"patch with two arguments did not create a mock")
test()
@patch.object(Test, 'something')
@patch.object(Test, 'something2')
def test(this1, this2, mock1, mock2):
self.assertEqual(this1, sentinel.this1,
"Patched function didn't receive initial argument")
self.assertEqual(this2, sentinel.this2,
"Patched function didn't receive second argument")
self.assertEqual(mock1, Test.something2,
"Mock not passed into test function")
self.assertEqual(mock2, Test.something,
"Second Mock not passed into test function")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
self.assertIsInstance(mock2, MagicMock,
"patch with two arguments did not create a mock")
# A hack to test that new mocks are passed the second time
self.assertNotEqual(outerMock1, mock1, "unexpected value for mock1")
self.assertNotEqual(outerMock2, mock2, "unexpected value for mock1")
return mock1, mock2
outerMock1 = outerMock2 = None
outerMock1, outerMock2 = test(sentinel.this1, sentinel.this2)
# Test that executing a second time creates new mocks
test(sentinel.this1, sentinel.this2)
def test_patch_with_spec(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec(self):
@patch.object(SomeClass, 'class_attribute', spec=SomeClass)
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_patch_with_spec_as_list(self):
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patchobject_with_spec_as_list(self):
@patch.object(SomeClass, 'class_attribute', spec=['wibble'])
def test(MockAttribute):
self.assertEqual(SomeClass.class_attribute, MockAttribute)
self.assertTrue(is_instance(SomeClass.class_attribute.wibble,
MagicMock))
self.assertRaises(AttributeError,
lambda: SomeClass.class_attribute.not_wibble)
test()
def test_nested_patch_with_spec_as_list(self):
# regression test for nested decorators
@patch('%s.open' % builtin_string)
@patch('%s.SomeClass' % __name__, spec=['wibble'])
def test(MockSomeClass, MockOpen):
self.assertEqual(SomeClass, MockSomeClass)
self.assertTrue(is_instance(SomeClass.wibble, MagicMock))
self.assertRaises(AttributeError, lambda: SomeClass.not_wibble)
test()
def test_patch_with_spec_as_boolean(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_object_with_spec_as_boolean(self):
@patch.object(PTModule, 'SomeClass', spec=True)
def test(MockSomeClass):
self.assertEqual(SomeClass, MockSomeClass)
# Should not raise attribute error
MockSomeClass.wibble
self.assertRaises(AttributeError, lambda: MockSomeClass.not_wibble)
test()
def test_patch_class_acts_with_spec_is_inherited(self):
@patch('%s.SomeClass' % __name__, spec=True)
def test(MockSomeClass):
self.assertTrue(is_instance(MockSomeClass, MagicMock))
instance = MockSomeClass()
self.assertNotCallable(instance)
# Should not raise attribute error
instance.wibble
self.assertRaises(AttributeError, lambda: instance.not_wibble)
test()
def test_patch_with_create_mocks_non_existent_attributes(self):
@patch('%s.frooble' % builtin_string, sentinel.Frooble, create=True)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_with_create_mocks_non_existent_attributes(self):
@patch.object(SomeClass, 'frooble', sentinel.Frooble, create=True)
def test():
self.assertEqual(SomeClass.frooble, sentinel.Frooble)
test()
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_wont_create_by_default(self):
try:
@patch('%s.frooble' % builtin_string, sentinel.Frooble)
def test():
self.assertEqual(frooble, sentinel.Frooble)
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertRaises(NameError, lambda: frooble)
def test_patchobject_wont_create_by_default(self):
try:
@patch.object(SomeClass, 'frooble', sentinel.Frooble)
def test():
self.fail('Patching non existent attributes should fail')
test()
except AttributeError:
pass
else:
self.fail('Patching non existent attributes should fail')
self.assertFalse(hasattr(SomeClass, 'frooble'))
def test_patch_with_static_methods(self):
class Foo(object):
@staticmethod
def woot():
return sentinel.Static
@patch.object(Foo, 'woot', staticmethod(lambda: sentinel.Patched))
def anonymous():
self.assertEqual(Foo.woot(), sentinel.Patched)
anonymous()
self.assertEqual(Foo.woot(), sentinel.Static)
def test_patch_local(self):
foo = sentinel.Foo
@patch.object(sentinel, 'Foo', 'Foo')
def anonymous():
self.assertEqual(sentinel.Foo, 'Foo')
anonymous()
self.assertEqual(sentinel.Foo, foo)
def test_patch_slots(self):
class Foo(object):
__slots__ = ('Foo',)
foo = Foo()
foo.Foo = sentinel.Foo
@patch.object(foo, 'Foo', 'Foo')
def anonymous():
self.assertEqual(foo.Foo, 'Foo')
anonymous()
self.assertEqual(foo.Foo, sentinel.Foo)
def test_patchobject_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Patched,
"unpatched")
def not_test_method(other_self):
self.assertEqual(Something.attribute, sentinel.Original,
"non-test method patched")
Foo = patch.object(Something, 'attribute', sentinel.Patched)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_class_decorator(self):
class Something(object):
attribute = sentinel.Original
class Foo(object):
def test_method(other_self, mock_something):
self.assertEqual(PTModule.something, mock_something,
"unpatched")
def not_test_method(other_self):
self.assertEqual(PTModule.something, sentinel.Something,
"non-test method patched")
Foo = patch('%s.something' % __name__)(Foo)
f = Foo()
f.test_method()
f.not_test_method()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
self.assertEqual(PTModule.something, sentinel.Something,
"patch not restored")
def test_patchobject_twice(self):
class Something(object):
attribute = sentinel.Original
next_attribute = sentinel.Original2
@patch.object(Something, 'attribute', sentinel.Patched)
@patch.object(Something, 'attribute', sentinel.Patched)
def test():
self.assertEqual(Something.attribute, sentinel.Patched, "unpatched")
test()
self.assertEqual(Something.attribute, sentinel.Original,
"patch not restored")
def test_patch_dict(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')])
def test():
self.assertEqual(len(foo), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo)
def test():
foo['a'] = 3
del foo['initial']
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'})
def test():
self.assertEqual(len(foo.values), 3)
self.assertEqual(foo['a'], 'b')
test()
self.assertEqual(foo.values, original)
def test_patch_dict_with_clear(self):
foo = {'initial': object(), 'other': 'something'}
original = foo.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
@patch.dict(foo, [('a', 'b')], clear=True)
def test():
self.assertEqual(foo, {'a': 'b'})
test()
self.assertEqual(foo, original)
def test_patch_dict_with_container_object_and_clear(self):
foo = Container()
foo['initial'] = object()
foo['other'] = 'something'
original = foo.values.copy()
@patch.dict(foo, clear=True)
def test():
self.assertEqual(foo.values, {})
foo['a'] = 3
foo['other'] = 'something else'
test()
self.assertEqual(foo.values, original)
@patch.dict(foo, {'a': 'b'}, clear=True)
def test():
self.assertEqual(foo.values, {'a': 'b'})
test()
self.assertEqual(foo.values, original)
def test_name_preserved(self):
foo = {}
@patch('%s.SomeClass' % __name__, object())
@patch('%s.SomeClass' % __name__, object(), autospec=True)
@patch.object(SomeClass, object())
@patch.dict(foo)
def some_name():
pass
self.assertEqual(some_name.__name__, 'some_name')
def test_patch_with_exception(self):
foo = {}
@patch.dict(foo, {'a': 'b'})
def test():
raise NameError('Konrad')
try:
test()
except NameError:
pass
else:
self.fail('NameError not raised by test')
self.assertEqual(foo, {})
def test_patch_dict_with_string(self):
@patch.dict('os.environ', {'konrad_delong': 'some value'})
def test():
self.assertIn('konrad_delong', os.environ)
test()
def test_patch_descriptor(self):
# would be some effort to fix this - we could special case the
# builtin descriptors: classmethod, property, staticmethod
return
class Nothing(object):
foo = None
class Something(object):
foo = {}
@patch.object(Nothing, 'foo', 2)
@classmethod
def klass(cls):
self.assertIs(cls, Something)
@patch.object(Nothing, 'foo', 2)
@staticmethod
def static(arg):
return arg
@patch.dict(foo)
@classmethod
def klass_dict(cls):
self.assertIs(cls, Something)
@patch.dict(foo)
@staticmethod
def static_dict(arg):
return arg
# these will raise exceptions if patching descriptors is broken
self.assertEqual(Something.static('f00'), 'f00')
Something.klass()
self.assertEqual(Something.static_dict('f00'), 'f00')
Something.klass_dict()
something = Something()
self.assertEqual(something.static('f00'), 'f00')
something.klass()
self.assertEqual(something.static_dict('f00'), 'f00')
something.klass_dict()
def test_patch_spec_set(self):
@patch('%s.SomeClass' % __name__, spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec=SomeClass, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
@patch.object(support, 'SomeClass', spec_set=True)
def test(MockClass):
MockClass.z = 'foo'
self.assertRaises(AttributeError, test)
def test_spec_set_inherit(self):
@patch('%s.SomeClass' % __name__, spec_set=True)
def test(MockClass):
instance = MockClass()
instance.z = 'foo'
self.assertRaises(AttributeError, test)
def test_patch_start_stop(self):
original = something
patcher = patch('%s.something' % __name__)
self.assertIs(something, original)
mock = patcher.start()
try:
self.assertIsNot(mock, original)
self.assertIs(something, mock)
finally:
patcher.stop()
self.assertIs(something, original)
def test_stop_without_start(self):
patcher = patch(foo_name, 'bar', 3)
# calling stop without start used to produce a very obscure error
self.assertRaises(RuntimeError, patcher.stop)
def test_patchobject_start_stop(self):
original = something
patcher = patch.object(PTModule, 'something', 'foo')
self.assertIs(something, original)
replaced = patcher.start()
try:
self.assertEqual(replaced, 'foo')
self.assertIs(something, replaced)
finally:
patcher.stop()
self.assertIs(something, original)
def test_patch_dict_start_stop(self):
d = {'foo': 'bar'}
original = d.copy()
patcher = patch.dict(d, [('spam', 'eggs')], clear=True)
self.assertEqual(d, original)
patcher.start()
try:
self.assertEqual(d, {'spam': 'eggs'})
finally:
patcher.stop()
self.assertEqual(d, original)
def test_patch_dict_class_decorator(self):
this = self
d = {'spam': 'eggs'}
original = d.copy()
class Test(object):
def test_first(self):
this.assertEqual(d, {'foo': 'bar'})
def test_second(self):
this.assertEqual(d, {'foo': 'bar'})
Test = patch.dict(d, {'foo': 'bar'}, clear=True)(Test)
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
test = Test()
test.test_first()
self.assertEqual(d, original)
test.test_second()
self.assertEqual(d, original)
def test_get_only_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(thing)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_get_set_delete_proxy(self):
class Something(object):
foo = 'foo'
class SomethingElse:
foo = 'foo'
for thing in Something, SomethingElse, Something(), SomethingElse:
proxy = _get_proxy(Something, get_only=False)
@patch.object(proxy, 'foo', 'bar')
def test():
self.assertEqual(proxy.foo, 'bar')
test()
self.assertEqual(proxy.foo, 'foo')
self.assertEqual(thing.foo, 'foo')
self.assertNotIn('foo', proxy.__dict__)
def test_patch_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch(foo_name, **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_object_keyword_args(self):
kwargs = {'side_effect': KeyError, 'foo.bar.return_value': 33,
'foo': MagicMock()}
patcher = patch.object(Foo, 'f', **kwargs)
mock = patcher.start()
patcher.stop()
self.assertRaises(KeyError, mock)
self.assertEqual(mock.foo.bar(), 33)
self.assertIsInstance(mock.foo, MagicMock)
def test_patch_dict_keyword_args(self):
original = {'foo': 'bar'}
copy = original.copy()
patcher = patch.dict(original, foo=3, bar=4, baz=5)
patcher.start()
try:
self.assertEqual(original, dict(foo=3, bar=4, baz=5))
finally:
patcher.stop()
self.assertEqual(original, copy)
def test_autospec(self):
class Boo(object):
def __init__(self, a):
pass
def f(self, a):
pass
def g(self):
pass
foo = 'bar'
class Bar(object):
def a(self):
pass
def _test(mock):
mock(1)
mock.assert_called_with(1)
self.assertRaises(TypeError, mock)
def _test2(mock):
mock.f(1)
mock.f.assert_called_with(1)
self.assertRaises(TypeError, mock.f)
mock.g()
mock.g.assert_called_with()
self.assertRaises(TypeError, mock.g, 1)
self.assertRaises(AttributeError, getattr, mock, 'h')
mock.foo.lower()
mock.foo.lower.assert_called_with()
self.assertRaises(AttributeError, getattr, mock.foo, 'bar')
mock.Bar()
mock.Bar.assert_called_with()
mock.Bar.a()
mock.Bar.a.assert_called_with()
self.assertRaises(TypeError, mock.Bar.a, 1)
mock.Bar().a()
mock.Bar().a.assert_called_with()
self.assertRaises(TypeError, mock.Bar().a, 1)
self.assertRaises(AttributeError, getattr, mock.Bar, 'b')
self.assertRaises(AttributeError, getattr, mock.Bar(), 'b')
def function(mock):
_test(mock)
_test2(mock)
_test2(mock(1))
self.assertIs(mock, Foo)
return mock
test = patch(foo_name, autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
module = sys.modules[__name__]
test = patch.object(module, 'Foo', autospec=True)(function)
mock = test()
self.assertIsNot(Foo, mock)
# test patching a second time works
test()
def test_autospec_function(self):
@patch('%s.function' % __name__, autospec=True)
def test(mock):
function(1)
function.assert_called_with(1)
function(2, 3)
function.assert_called_with(2, 3)
self.assertRaises(TypeError, function)
self.assertRaises(AttributeError, getattr, function, 'foo')
test()
def test_autospec_keywords(self):
@patch('%s.function' % __name__, autospec=True,
return_value=3)
def test(mock_function):
#self.assertEqual(function.abc, 'foo')
return function(1, 2)
result = test()
self.assertEqual(result, 3)
def test_autospec_with_new(self):
patcher = patch('%s.function' % __name__, new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
module = sys.modules[__name__]
patcher = patch.object(module, 'function', new=3, autospec=True)
self.assertRaises(TypeError, patcher.start)
def test_autospec_with_object(self):
class Bar(Foo):
extra = []
patcher = patch(foo_name, autospec=Bar)
mock = patcher.start()
try:
self.assertIsInstance(mock, Bar)
self.assertIsInstance(mock.extra, list)
finally:
patcher.stop()
def test_autospec_inherits(self):
FooClass = Foo
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIsInstance(mock, FooClass)
self.assertIsInstance(mock(3), FooClass)
finally:
patcher.stop()
def test_autospec_name(self):
patcher = patch(foo_name, autospec=True)
mock = patcher.start()
try:
self.assertIn(" name='Foo'", repr(mock))
self.assertIn(" name='Foo.f'", repr(mock.f))
self.assertIn(" name='Foo()'", repr(mock(None)))
self.assertIn(" name='Foo().f'", repr(mock(None).f))
finally:
patcher.stop()
def test_tracebacks(self):
@patch.object(Foo, 'f', object())
def test():
raise AssertionError
try:
test()
except:
err = sys.exc_info()
result = unittest.TextTestResult(None, None, 0)
traceback = result._exc_info_to_string(err, self)
self.assertIn('raise AssertionError', traceback)
def test_new_callable_patch(self):
patcher = patch(foo_name, new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_patch_object(self):
patcher = patch.object(Foo, 'f', new_callable=NonCallableMagicMock)
m1 = patcher.start()
patcher.stop()
m2 = patcher.start()
patcher.stop()
self.assertIsNot(m1, m2)
for mock in m1, m2:
self.assertNotCallable(m1)
def test_new_callable_keyword_arguments(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, arg1=1, arg2=2)
m = patcher.start()
try:
self.assertIs(type(m), Bar)
self.assertEqual(Bar.kwargs, dict(arg1=1, arg2=2))
finally:
patcher.stop()
def test_new_callable_spec(self):
class Bar(object):
kwargs = None
def __init__(self, **kwargs):
Bar.kwargs = kwargs
patcher = patch(foo_name, new_callable=Bar, spec=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec=Bar))
finally:
patcher.stop()
patcher = patch(foo_name, new_callable=Bar, spec_set=Bar)
patcher.start()
try:
self.assertEqual(Bar.kwargs, dict(spec_set=Bar))
finally:
patcher.stop()
def test_new_callable_create(self):
non_existent_attr = '%s.weeeee' % foo_name
p = patch(non_existent_attr, new_callable=NonCallableMock)
self.assertRaises(AttributeError, p.start)
p = patch(non_existent_attr, new_callable=NonCallableMock,
create=True)
m = p.start()
try:
self.assertNotCallable(m, magic=False)
finally:
p.stop()
def test_new_callable_incompatible_with_new(self):
self.assertRaises(
ValueError, patch, foo_name, new=object(), new_callable=MagicMock
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new=object(),
new_callable=MagicMock
)
def test_new_callable_incompatible_with_autospec(self):
self.assertRaises(
ValueError, patch, foo_name, new_callable=MagicMock,
autospec=True
)
self.assertRaises(
ValueError, patch.object, Foo, 'f', new_callable=MagicMock,
autospec=True
)
def test_new_callable_inherit_for_mocks(self):
class MockSub(Mock):
pass
MockClasses = (
NonCallableMock, NonCallableMagicMock, MagicMock, Mock, MockSub
)
for Klass in MockClasses:
for arg in 'spec', 'spec_set':
kwargs = {arg: True}
p = patch(foo_name, new_callable=Klass, **kwargs)
m = p.start()
try:
instance = m.return_value
self.assertRaises(AttributeError, getattr, instance, 'x')
finally:
p.stop()
def test_new_callable_inherit_non_mock(self):
class NotAMock(object):
def __init__(self, spec):
self.spec = spec
p = patch(foo_name, new_callable=NotAMock, spec=True)
m = p.start()
try:
self.assertTrue(is_instance(m, NotAMock))
self.assertRaises(AttributeError, getattr, m, 'return_value')
finally:
p.stop()
self.assertEqual(m.spec, Foo)
def test_new_callable_class_decorating(self):
test = self
original = Foo
class SomeTest(object):
def _test(self, mock_foo):
test.assertIsNot(Foo, original)
test.assertIs(Foo, mock_foo)
test.assertIsInstance(Foo, SomeClass)
def test_two(self, mock_foo):
self._test(mock_foo)
def test_one(self, mock_foo):
self._test(mock_foo)
SomeTest = patch(foo_name, new_callable=SomeClass)(SomeTest)
SomeTest().test_one()
SomeTest().test_two()
self.assertIs(Foo, original)
def test_patch_multiple(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher1 = patch.multiple(foo_name, f=1, g=2)
patcher2 = patch.multiple(Foo, f=1, g=2)
for patcher in patcher1, patcher2:
patcher.start()
try:
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 1)
self.assertEqual(Foo.g, 2)
finally:
patcher.stop()
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
@patch.multiple(foo_name, f=3, g=4)
def test():
self.assertIs(Foo, original_foo)
self.assertEqual(Foo.f, 3)
self.assertEqual(Foo.g, 4)
test()
def test_patch_multiple_no_kwargs(self):
self.assertRaises(ValueError, patch.multiple, foo_name)
self.assertRaises(ValueError, patch.multiple, Foo)
def test_patch_multiple_create_mocks(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
def test(f, foo):
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertEqual(Foo.g, 3)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_different_order(self):
# bug revealed by Jython!
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 3)
patcher.attribute_name = 'f'
other = patch.object(Foo, 'g', DEFAULT)
other.attribute_name = 'g'
patcher.additional_patchers = [other]
@patcher
def test(g):
self.assertIs(Foo.g, g)
self.assertEqual(Foo.f, 3)
test()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_stacked_decorators(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
@patch(foo_name + '.g')
def test1(g, **kwargs):
_test(g, **kwargs)
@patch.multiple(foo_name, f=DEFAULT)
@patch(foo_name + '.g')
@patch.multiple(foo_name, foo=DEFAULT)
def test2(g, **kwargs):
_test(g, **kwargs)
@patch(foo_name + '.g')
@patch.multiple(foo_name, f=DEFAULT)
@patch.multiple(foo_name, foo=DEFAULT)
def test3(g, **kwargs):
_test(g, **kwargs)
def _test(g, **kwargs):
f = kwargs.pop('f')
foo = kwargs.pop('foo')
self.assertFalse(kwargs)
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.g, g)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(g, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
test1()
test2()
test3()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create_mocks_patcher(self):
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
patcher = patch.multiple(foo_name, f=DEFAULT, g=3, foo=DEFAULT)
result = patcher.start()
try:
f = result['f']
foo = result['foo']
self.assertEqual(set(result), set(['f', 'foo']))
self.assertIs(Foo, original_foo)
self.assertIs(Foo.f, f)
self.assertIs(Foo.foo, foo)
self.assertTrue(is_instance(f, MagicMock))
self.assertTrue(is_instance(foo, MagicMock))
finally:
patcher.stop()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_decorating_class(self):
test = self
original_foo = Foo
original_f = Foo.f
original_g = Foo.g
class SomeTest(object):
def _test(self, f, foo):
test.assertIs(Foo, original_foo)
test.assertIs(Foo.f, f)
test.assertEqual(Foo.g, 3)
test.assertIs(Foo.foo, foo)
test.assertTrue(is_instance(f, MagicMock))
test.assertTrue(is_instance(foo, MagicMock))
def test_two(self, f, foo):
self._test(f, foo)
def test_one(self, f, foo):
self._test(f, foo)
SomeTest = patch.multiple(
foo_name, f=DEFAULT, g=3, foo=DEFAULT
)(SomeTest)
thing = SomeTest()
thing.test_one()
thing.test_two()
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_create(self):
patcher = patch.multiple(Foo, blam='blam')
self.assertRaises(AttributeError, patcher.start)
patcher = patch.multiple(Foo, blam='blam', create=True)
patcher.start()
try:
self.assertEqual(Foo.blam, 'blam')
finally:
patcher.stop()
self.assertFalse(hasattr(Foo, 'blam'))
def test_patch_multiple_spec_set(self):
# if spec_set works then we can assume that spec and autospec also
# work as the underlying machinery is the same
patcher = patch.multiple(Foo, foo=DEFAULT, spec_set=['a', 'b'])
result = patcher.start()
try:
self.assertEqual(Foo.foo, result['foo'])
Foo.foo.a(1)
Foo.foo.b(2)
Foo.foo.a.assert_called_with(1)
Foo.foo.b.assert_called_with(2)
self.assertRaises(AttributeError, setattr, Foo.foo, 'c', None)
finally:
patcher.stop()
def test_patch_multiple_new_callable(self):
class Thing(object):
pass
patcher = patch.multiple(
Foo, f=DEFAULT, g=DEFAULT, new_callable=Thing
)
result = patcher.start()
try:
self.assertIs(Foo.f, result['f'])
self.assertIs(Foo.g, result['g'])
self.assertIsInstance(Foo.f, Thing)
self.assertIsInstance(Foo.g, Thing)
self.assertIsNot(Foo.f, Foo.g)
finally:
patcher.stop()
def test_nested_patch_failure(self):
original_f = Foo.f
original_g = Foo.g
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'missing', 1)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'missing', 1)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'f', 1)
def thing1():
pass
@patch.object(Foo, 'foo', new_callable=crasher)
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
def thing2():
pass
@patch.object(Foo, 'g', 1)
@patch.object(Foo, 'f', 1)
@patch.object(Foo, 'foo', new_callable=crasher)
def thing3():
pass
for func in thing1, thing2, thing3:
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_failure(self):
original_f = Foo.f
original_g = Foo.g
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'missing', 1)
bad.attribute_name = 'missing'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(AttributeError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
def test_patch_multiple_new_callable_failure(self):
original_f = Foo.f
original_g = Foo.g
original_foo = Foo.foo
def crasher():
raise NameError('crasher')
patcher = patch.object(Foo, 'f', 1)
patcher.attribute_name = 'f'
good = patch.object(Foo, 'g', 1)
good.attribute_name = 'g'
bad = patch.object(Foo, 'foo', new_callable=crasher)
bad.attribute_name = 'foo'
for additionals in [good, bad], [bad, good]:
patcher.additional_patchers = additionals
@patcher
def func():
pass
self.assertRaises(NameError, func)
self.assertEqual(Foo.f, original_f)
self.assertEqual(Foo.g, original_g)
self.assertEqual(Foo.foo, original_foo)
def test_patch_multiple_string_subclasses(self):
Foo = type('Foo', (str,), {'fish': 'tasty'})
foo = Foo()
@patch.multiple(foo, fish='nearly gone')
def test():
self.assertEqual(foo.fish, 'nearly gone')
test()
self.assertEqual(foo.fish, 'tasty')
@patch('unittest.mock.patch.TEST_PREFIX', 'foo')
def test_patch_test_prefix(self):
class Foo(object):
thing = 'original'
def foo_one(self):
return self.thing
def foo_two(self):
return self.thing
def test_one(self):
return self.thing
def test_two(self):
return self.thing
Foo = patch.object(Foo, 'thing', 'changed')(Foo)
foo = Foo()
self.assertEqual(foo.foo_one(), 'changed')
self.assertEqual(foo.foo_two(), 'changed')
self.assertEqual(foo.test_one(), 'original')
self.assertEqual(foo.test_two(), 'original')
@patch('unittest.mock.patch.TEST_PREFIX', 'bar')
def test_patch_dict_test_prefix(self):
class Foo(object):
def bar_one(self):
return dict(the_dict)
def bar_two(self):
return dict(the_dict)
def test_one(self):
return dict(the_dict)
def test_two(self):
return dict(the_dict)
the_dict = {'key': 'original'}
Foo = patch.dict(the_dict, key='changed')(Foo)
foo =Foo()
self.assertEqual(foo.bar_one(), {'key': 'changed'})
self.assertEqual(foo.bar_two(), {'key': 'changed'})
self.assertEqual(foo.test_one(), {'key': 'original'})
self.assertEqual(foo.test_two(), {'key': 'original'})
def test_patch_with_spec_mock_repr(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
self.assertIn(" name='SomeClass'", repr(m))
self.assertIn(" name='SomeClass.class_attribute'",
repr(m.class_attribute))
self.assertIn(" name='SomeClass()'", repr(m()))
self.assertIn(" name='SomeClass().class_attribute'",
repr(m().class_attribute))
finally:
p.stop()
def test_patch_nested_autospec_repr(self):
with patch('unittest.test.testmock.support', autospec=True) as m:
self.assertIn(" name='support.SomeClass.wibble()'",
repr(m.SomeClass.wibble()))
self.assertIn(" name='support.SomeClass().wibble()'",
repr(m.SomeClass().wibble()))
def test_mock_calls_with_patch(self):
for arg in ('spec', 'autospec', 'spec_set'):
p = patch('%s.SomeClass' % __name__, **{arg: True})
m = p.start()
try:
m.wibble()
kalls = [call.wibble()]
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(m.method_calls, kalls)
self.assertEqual(m.wibble.mock_calls, [call()])
result = m()
kalls.append(call())
self.assertEqual(m.mock_calls, kalls)
result.wibble()
kalls.append(call().wibble())
self.assertEqual(m.mock_calls, kalls)
self.assertEqual(result.mock_calls, [call.wibble()])
self.assertEqual(result.wibble.mock_calls, [call()])
self.assertEqual(result.method_calls, [call.wibble()])
finally:
p.stop()
def test_patch_imports_lazily(self):
sys.modules.pop('squizz', None)
p1 = patch('squizz.squozz')
self.assertRaises(ImportError, p1.start)
squizz = Mock()
squizz.squozz = 6
sys.modules['squizz'] = squizz
p1 = patch('squizz.squozz')
squizz.squozz = 3
p1.start()
p1.stop()
self.assertEqual(squizz.squozz, 3)
def test_patch_propogrates_exc_on_exit(self):
class holder:
exc_info = None, None, None
class custom_patch(_patch):
def __exit__(self, etype=None, val=None, tb=None):
_patch.__exit__(self, etype, val, tb)
holder.exc_info = etype, val, tb
stop = __exit__
def with_custom_patch(target):
getter, attribute = _get_target(target)
return custom_patch(
getter, attribute, DEFAULT, None, False, None,
None, None, {}
)
@with_custom_patch('squizz.squozz')
def test(mock):
raise RuntimeError
self.assertRaises(RuntimeError, test)
self.assertIs(holder.exc_info[0], RuntimeError)
self.assertIsNotNone(holder.exc_info[1],
'exception value not propgated')
self.assertIsNotNone(holder.exc_info[2],
'exception traceback not propgated')
def test_create_and_specs(self):
for kwarg in ('spec', 'spec_set', 'autospec'):
p = patch('%s.doesnotexist' % __name__, create=True,
**{kwarg: True})
self.assertRaises(TypeError, p.start)
self.assertRaises(NameError, lambda: doesnotexist)
# check that spec with create is innocuous if the original exists
p = patch(MODNAME, create=True, **{kwarg: True})
p.start()
p.stop()
def test_multiple_specs(self):
original = PTModule
for kwarg in ('spec', 'spec_set'):
p = patch(MODNAME, autospec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
for kwarg in ('spec_set', 'autospec'):
p = patch(MODNAME, spec=0, **{kwarg: 0})
self.assertRaises(TypeError, p.start)
self.assertIs(PTModule, original)
def test_specs_false_instead_of_none(self):
p = patch(MODNAME, spec=False, spec_set=False, autospec=False)
mock = p.start()
try:
# no spec should have been set, so attribute access should not fail
mock.does_not_exist
mock.does_not_exist = 3
finally:
p.stop()
def test_falsey_spec(self):
for kwarg in ('spec', 'autospec', 'spec_set'):
p = patch(MODNAME, **{kwarg: 0})
m = p.start()
try:
self.assertRaises(AttributeError, getattr, m, 'doesnotexit')
finally:
p.stop()
def test_spec_set_true(self):
for kwarg in ('spec', 'autospec'):
p = patch(MODNAME, spec_set=True, **{kwarg: True})
m = p.start()
try:
self.assertRaises(AttributeError, setattr, m,
'doesnotexist', 'something')
self.assertRaises(AttributeError, getattr, m, 'doesnotexist')
finally:
p.stop()
def test_callable_spec_as_list(self):
spec = ('__call__',)
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertTrue(callable(m))
finally:
p.stop()
def test_not_callable_spec_as_list(self):
spec = ('foo', 'bar')
p = patch(MODNAME, spec=spec)
m = p.start()
try:
self.assertFalse(callable(m))
finally:
p.stop()
def test_patch_stopall(self):
unlink = os.unlink
chdir = os.chdir
path = os.path
patch('os.unlink', something).start()
patch('os.chdir', something_else).start()
@patch('os.path')
def patched(mock_path):
patch.stopall()
self.assertIs(os.path, mock_path)
self.assertIs(os.unlink, unlink)
self.assertIs(os.chdir, chdir)
patched()
self.assertIs(os.path, path)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
philippmayrth/uptime | tests/uptime_tests.py | 2 | 5533 | #!/usr/bin/env python
# coding: utf8
import imp
import sys
import time
from datetime import datetime
import unittest
sys.path.insert(0, '.')
import src as uptime
boottime_helpers = [f for f in vars(uptime) if f.startswith('_boottime_')]
uptime_helpers = [f for f in vars(uptime) if f.startswith('_uptime_')]
class NormalTest(unittest.TestCase):
"""
This class just calls each of the functions normally and ensures they don't
do dumb things like throw exceptions or return complex numbers.
"""
def tearDown(self):
"""
__boottime affects how boottime() and its helpers work, and it may be
set as a side-effect by any function. To be on the safe side, just
reload the whole module every time.
"""
imp.reload(uptime)
def basic_test(self, func, rettypes):
"""
Calls a given function and checks if it returns something of a type
in the sequence rettypes.
"""
ret = func()
self.assertTrue(any(isinstance(ret, t) for t in rettypes))
def __getattr__(self, name):
# I really don't feel like writing and maintaining over a dozen
# essentially identical methods, and if there's a cleaner way to do
# this, I couldn't find it in the unittest docs.
if name.startswith('test_'):
func = name[5:]
if func == 'uptime' or func in uptime_helpers:
rettypes = (type(None), float, int)
elif func == 'boottime' or func in boottime_helpers:
rettypes = (type(None), datetime)
else:
raise AttributeError()
return lambda: self.basic_test(getattr(uptime, func), rettypes)
else:
return unittest.TestCase.__getattr__(self, name)
class BrokenCtypesTest(NormalTest):
"""
It's ridiculous how many platforms don't have ctypes. This class simulates
that.
"""
@classmethod
def setUpClass(cls):
uptime.ctypes = None
delattr(uptime, 'struct')
delattr(uptime, 'os')
class OtherTest(unittest.TestCase):
def setUp(self):
imp.reload(uptime)
def test_equality_guarantee(self):
"""
If uptime.uptime and uptime.boottime are the only functions called,
it is guaranteed that the uptime subtracted from the current time is
the reported boot time, or that both are None.
"""
up = uptime.uptime()
if up is None:
self.assertTrue(uptime.boottime() is None)
else:
boot1 = time.localtime(time.time() - up)
boot2 = uptime.boottime()
self.assertTrue(boot1 == boot2)
def test_broken_datetime(self):
"""
datetime was introduced in Python 2.3, and though we officially only
support Python 2.5+ (because of ctypes), there are some platforms that
only have older versions available for which we can still provide
meaningful answers (Plan 9, mostly).
Importing uptime shouldn't immediately fail for them, but calling
boottime and its helpers should raise a RuntimeError.
"""
uptime.datetime = None
self.assertRaises(RuntimeError, uptime.boottime)
for h in boottime_helpers:
self.assertRaises(RuntimeError, getattr(uptime, h))
def run_suite(suite):
"""
unittest is basically a disaster, so let's do this ourselves.
"""
sys.stdout.write('Running %d tests... \n' % tests.countTestCases())
res = unittest.TestResult()
suite.run(res)
if res.wasSuccessful():
sys.stdout.write('Finished without errors.\n')
return
sys.stdout.write('\n')
for problems, kind in ((res.errors, 'error'),
(res.failures, 'failure')):
if len(problems):
head = '%d %s%s' % (len(problems),
kind,
's' if len(problems) != 1 else '')
sys.stdout.write('\033[1;31m%s\n%s\033[0m\n' %
(head, '⎻' * len(head)))
for problem in problems:
func = problem[0]._testMethodName[5:]
environ = ' (broken ctypes)' if isinstance(problem[0],
BrokenCtypesTest) \
else ''
sys.stdout.write(
'• \033[1m%s%s\033[0m failed with message:\n\n%s\n\n' %
(func, environ, '\n'.join(map(lambda s: ' ' + s,
problem[1].splitlines())))
)
sys.stdout.write('%d tests completed successfully.\n' %
(res.testsRun - len(res.failures) - len(res.errors)))
if __name__ == '__main__':
tests = unittest.TestSuite()
# uptime tests
tests.addTests([NormalTest('test_uptime'),
BrokenCtypesTest('test_uptime')])
for helper in uptime_helpers:
tests.addTests([NormalTest('test_%s' % helper),
BrokenCtypesTest('test_%s' % helper)])
# boottime tests
tests.addTests([NormalTest('test_boottime'),
BrokenCtypesTest('test_boottime')])
for helper in boottime_helpers:
tests.addTests([NormalTest('test_%s' % helper),
BrokenCtypesTest('test_%s' % helper)])
# Other tests
tests.addTest(OtherTest('test_equality_guarantee'))
tests.addTest(OtherTest('test_broken_datetime'))
run_suite(tests)
| bsd-2-clause |
ABaldwinHunter/django-clone | django/contrib/auth/urls.py | 568 | 1036 | # The views used below are normally mapped in django.contrib.admin.urls.py
# This URLs file is used to provide a reliable view deployment for test purposes.
# It is also provided as a convenience to those who want to deploy these URLs
# elsewhere.
from django.conf.urls import url
from django.contrib.auth import views
urlpatterns = [
url(r'^login/$', views.login, name='login'),
url(r'^logout/$', views.logout, name='logout'),
url(r'^password_change/$', views.password_change, name='password_change'),
url(r'^password_change/done/$', views.password_change_done, name='password_change_done'),
url(r'^password_reset/$', views.password_reset, name='password_reset'),
url(r'^password_reset/done/$', views.password_reset_done, name='password_reset_done'),
url(r'^reset/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm, name='password_reset_confirm'),
url(r'^reset/done/$', views.password_reset_complete, name='password_reset_complete'),
]
| bsd-3-clause |
buitenzorg812/Koha-PTFS | koha-tmpl/intranet-tmpl/prog/en/lib/greybox/GreyBox_v5_5/combiner.py | 109 | 3808 | #!/usr/bin/env python
"""
Used to combine the different parts of GreyBox.
- Python 2.4 required
- Java 1.4+ required
- Dojo's JavaScript compressor (http://dojotoolkit.org/docs/compressor_system.html). Place it under compression_lib/custom_rhino.jar
"""
import os, sys, shutil
from compression_lib import AJS_minify
if __name__ == '__main__':
args = sys.argv
if len(args) < 2:
print """
Usage is:
python combiner.py [full|gallery|window]
Example usage:
python combiner.py full
The files will be store in greybox_dist/* depending on the dist. type
"""
sys.exit(0)
type = args[1]
output_dir = 'greybox'
##
# Config file list
#
js = []
css = []
static = []
append = lambda l, x: l.append('greybox_source/%s' % x)
def appendBase():
append(js, 'base/base.js')
append(js, 'auto_deco.js')
append(css, 'base/base.css')
append(static, 'base/indicator.gif')
append(static, 'base/loader_frame.html')
def appendSet():
append(js, 'set/set.js')
append(css, 'set/set.css')
append(static, 'set/next.gif')
append(static, 'set/prev.gif')
def appendGallery():
append(js, 'gallery/gallery.js')
append(css, 'gallery/gallery.css')
append(static, 'gallery/g_close.gif')
def appendWindow():
append(js, 'window/window.js')
append(css, 'window/window.css')
append(static, 'window/header_bg.gif')
append(static, 'window/w_close.gif')
appendBase()
if type == 'full':
appendGallery()
appendSet()
appendWindow()
elif type == 'gallery':
appendGallery()
appendSet()
elif type == 'window':
appendWindow()
else:
sys.exit('Uknown type')
print 'Follwoing styles are used:'
for style in css:
print ' %s' % style
print 'Follwoing JavaScript is used:'
for script in js:
print ' %s' % script
##
# Copy the files
#
try:
shutil.rmtree(output_dir)
except:
pass
os.mkdir(output_dir)
def concatFiles(f_list):
data = []
for f in f_list:
data.append(open(f, 'r').read())
return '\n\n'.join(data)
def copyFiles(f_list):
for f in f_list:
shutil.copy(f, output_dir)
copyFiles(static)
fp = open('%s/%s' % (output_dir, 'gb_styles.css'), 'w')
fp.write(concatFiles(css))
fp.close()
print 'Compressed styles in %s' % ('greybox/gb_styles.css')
##
# Concat js
#
fp = open('%s/%s' % (output_dir, 'gb_scripts_tmp.js'), 'w')
fp.write(concatFiles(js))
fp.close()
AJS_minify.AJS_SRC = 'greybox_source/base/AJS.js'
AJS_minify.AJS_MINI_SRC = 'greybox/AJS_tmp.js'
files = ['greybox/gb_scripts_tmp.js', 'greybox_source/base/AJS_fx.js', 'static_files/help.js']
code_analyzer = AJS_minify.ExternalCodeAnalyzer(files)
composer = AJS_minify.AjsComposer(code_analyzer.findFunctions())
composer.writeToOutput()
os.popen('java -jar compression_lib/custom_rhino.jar -c greybox/AJS_tmp.js > greybox/AJS.js')
os.remove('greybox/AJS_tmp.js')
os.popen('java -jar compression_lib/custom_rhino.jar -c greybox_source/base/AJS_fx.js > greybox/AJS_fx.js')
print 'Compressed AJS.js and AJS.js into greybox/'
os.popen('java -jar compression_lib/custom_rhino.jar -c greybox/gb_scripts_tmp.js > greybox/gb_scripts.js')
os.remove('greybox/gb_scripts_tmp.js')
print 'Compressed JavaScript in %s' % ('greybox/gb_scripts.css')
#Append script_loaded
open('greybox/AJS.js', 'a').write('\nscript_loaded=true;')
open('greybox/AJS_fx.js', 'a').write('\nscript_loaded=true;')
open('greybox/gb_scripts.js', 'a').write('\nscript_loaded=true;')
| gpl-2.0 |
omegachysis/arche-engine | arche/image.py | 1 | 11087 |
_panda = False
try:
import pygame
from pygame import locals
except:
_panda = True
import logging
log = logging.getLogger("R.Surface")
def scaleImage(surface, width, height):
""" Return surface scaled to fit width and height. """
#log.debug("scaled image %s" % repr(surface))
return pygame.transform.smoothscale(surface, (width, height))
def profilerRecordImageSurfaces():
log.info("PERFORMANCE PROFILER ENGAGED: RecordImageSurfaces")
ImageSurface.debugRecordSurfaces = True
def profilerRevealPixelAlpha():
log.info("PERFORMANCE PROFILER ENGAGED: RevealPixelAlpha")
ImageSurface.debugRevealPixelAlpha = True
for surf in ImageSurface.imageSurfaces:
surf.refresh()
if not ImageSurface.debugRecordSurfaces:
log.warning("PERFORMANCE PROFILER FAILED: Not recording surfaces; "+\
"inconsistancies may occur.")
def createDefaultSurface():
surface = pygame.Surface((1,1))
surface.fill((255,255,255,255))
return surface
newDefaultSurface = createDefaultSurface
def newRectangle(width, height, color = (255,255,255)):
surface = pygame.Surface((width, height))
surface.fill(color)
return surface
class _ImageRect(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self.width = width
self.height = height
class ImageSurfacePanda(object):
def __init__(self, surface, pixelAlpha=True):
if isinstance(surface, str):
self.surface = loader.loadTexture(surface)
def getSurface(self):
return self._surface
def setSurface(self, value):
self._surface = value
self._rect = _ImageRect(0, 0, self.width, self.height)
surface = property(getSurface, setSurface)
def getWidth(self):
return self._surface.getSimpleXSize()
def getHeight(self):
return self._surface.getSimpleYSize()
width = property(getWidth)
height = property(getHeight)
def rect(self):
try:
return self._rect
except:
return None
def refresh(self):
pass
class ImageCanvas(object):
def __init__(self, pygameSurface):
self.composite = pygameSurface.convert()
self.clip = None
def convert(self):
return self.composite.convert()
def convertAlpha(self):
return self.composite.convert_alpha()
def refresh(self):
pass
def rect(self):
return self.composite.get_rect()
def get(self):
return self.composite
class ImageSurface(object):
imageSurfaces = []
debugRecordSurfaces = False
debugRevealPixelAlpha = False
if debugRevealPixelAlpha:
log.debug("PERFORMANCE PROFILER ENGAGED: RevealPixelAlpha")
def __init__(self, surface, pixelAlpha=True):
if ImageSurface.debugRecordSurfaces:
ImageSurface.imageSurfaces.append(self)
if isinstance(surface, str):
surface = pygame.image.load(surface)
elif isinstance(surface, ImageSurface):
surface = surface.source
if surface:
if not pixelAlpha:
self._surface = surface.convert()
else:
self._surface = surface.convert_alpha()
else:
self._surface = None
self.composite = None
self._modScale = None
self._modColor = None
self._pixelAlpha = pixelAlpha
if self._surface:
self._width = self._surface.get_width()
self._height = self._surface.get_height()
else:
self._width = 0
self._height = 0
self._red = 255
self._green = 255
self._blue = 255
self._alpha = 255
if self._surface:
self.refresh()
_clip = None
_clipX = 0
_clipY = 0
def convert(self):
return self.composite.convert()
def convertAlpha(self):
return self.composite.convert_alpha()
def getPixel(self, x, y):
return self.get().get_at((x,y))
def copy(self):
return ImageSurface(self, self._pixelAlpha)
def resetClip(self):
self.setClip((0,0,self.getWidth(),self.getHeight()))
def removeClip(self):
self.setClip(None)
def getClip(self):
return self._clip
def setClip(self, value):
if value:
self._clipX = value[0]
self._clipY = value[1]
self.applyClip()
self._clip = value
clip = property(getClip, setClip)
def getClipX(self):
return self._clipX
def setClipX(self, value):
if not self._clip:
self.resetClip()
self._clipX = value
clip = self.getClip()
self.setClip((value, clip[1], clip[2], clip[3]))
clipX = property(getClipX, setClipX)
def getClipY(self):
return self._clipY
def setClipY(self, value):
if not self._clip:
self.resetClip()
self._clipY = value
clip = self.getClip()
self.setClip((clip[0], value, clip[2], clip[3]))
clipY = property(getClipY, setClipY)
def setAllowPixelAlpha(self, allowPixelAlpha):
if allowPixelAlpha != self._pixelAlpha:
if allowPixelAlpha:
self._surface = self._surface.convert_alpha()
else:
self._surface = self._surface.convert()
self._pixelAlpha = allowPixelAlpha
def getAllowPixelAlpha(self):
return self._pixelAlpha
allowPixelAlpha = property(getAllowPixelAlpha, setAllowPixelAlpha)
def _revealPixelAlpha(self):
if self._pixelAlpha:
surface = pygame.Surface((self._width, self._height)).convert_alpha()
surface.fill((255,0,0,255))
return surface
else:
surface = pygame.Surface((self._width, self._height)).convert()
surface.fill((0,255,0,255))
return surface
def refresh(self):
""" Apply all modified image parameters. """
if self.source:
self.applyScale()
def replace(self, surface, normalize=True):
""" Replace source surface with another. """
if ImageSurface.debugRevealPixelAlpha:
surface = self._revealPixelAlpha()
if not self._pixelAlpha:
self._surface = surface.convert()
else:
self._surface = surface.convert_alpha()
self.refresh()
if normalize:
self.normalize()
def permeate(self):
""" Set the source image surface to the current composite surface. """
self.source = self.composite
def normalize(self):
""" Reset scaling parameters to fit source surface. """
self.size = self._surface.get_size()
def get(self):
""" Get the finished composite surface. """
return self.composite
def rect(self):
""" Get rectangle of compsite surface. """
if self.composite:
return self.composite.get_rect()
else:
return pygame.Rect((0,0,1,1))
def convert(self):
""" Return a converted version of the source surface. """
if not self._pixelAlpha:
return self._surface.convert()
else:
return self._surface.convert_alpha()
def applyScale(self):
# This is a slow pass. Do this as little as possible.
self._modScale = scaleImage(self._surface, int(self._width), int(self._height))
if ImageSurface.debugRevealPixelAlpha:
if self._pixelAlpha:
self._modScale.fill((255,0,0,255))
else:
self._modScale.fill((0,255,0,255))
self.applyColor()
self.applyAlpha()
self.applyClip()
def applyColor(self):
# This is a semi fast pass. Use the scaling slow passed image.
if not ImageSurface.debugRevealPixelAlpha:
if not self._pixelAlpha:
self._modColor = self._modScale.convert()
self._modColor.fill((self._red, self._green, self._blue),
None, locals.BLEND_RGB_MULT)
self.applyAlpha()
else:
self._modColor = self._modScale.convert_alpha()
self._modColor.fill((self._red, self._green, self._blue, self._alpha),
None, locals.BLEND_RGBA_MULT)
self.composite = self._modColor
else:
self.composite = self._modScale
def applyAlpha(self):
# This is a fast pass. Use the double passed image from scale and color.
if not ImageSurface.debugRevealPixelAlpha:
if not self._pixelAlpha:
self._modColor.set_alpha(self._alpha)
self.composite = self._modColor
else:
self.applyColor()
else:
self.composite = self._modScale
def applyClip(self):
# This is a very fast pass. Use the triple passed image from scale, color, and alpha
image = self._modColor
image.set_clip(self._clip)
self.composite = image
def getSource(self):
return self._surface
def setSource(self, source):
self.replace(source, True)
source = property(getSource, setSource)
image = property(getSource, setSource)
def getWidth(self):
return self._width
def setWidth(self, width):
self._width = width
self.applyScale()
width = property(getWidth, setWidth)
def getHeight(self):
return self._height
def setHeight(self, height):
self._height = height
self.applyScale()
height = property(getHeight, setHeight)
def getSize(self):
return (self._width, self._height)
def setSize(self, size):
self._width = size[0]
self._height = size[1]
self.applyScale()
size = property(getSize, setSize)
def setScale(self, scalar):
self.setSize((self.getWidth() * scalar, self.getHeight() * scalar))
def getRed(self):
return self._red
def setRed(self, red):
self._red = red
self.applyColor()
red = property(getRed, setRed)
def getGreen(self):
return self._green
def setGreen(self, green):
self._green = green
self.applyColor()
green = property(getGreen, setGreen)
def getBlue(self):
return self._blue
def setBlue(self, blue):
self._blue = blue
self.applyColor()
blue = property(getBlue, setBlue)
def getAlpha(self):
return self._alpha
def setAlpha(self, alpha):
self._alpha = alpha
self.applyAlpha()
alpha = property(getAlpha, setAlpha)
def getColor(self):
return (self._red, self._green, self._blue)
def setColor(self, color):
self._red = color[0]
self._green = color[1]
self._blue = color[2]
self.applyColor()
color = property(getColor, setColor)
if _panda:
ImageSurface = ImageSurfacePanda | apache-2.0 |
idl3r/P8000-Kernel | Documentation/target/tcm_mod_builder.py | 2358 | 40707 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
hujiajie/chromium-crosswalk | third_party/WebKit/LayoutTests/http/tests/websocket/unicode_wsh.py | 42 | 2082 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from mod_pywebsocket import msgutil
# Hello in Japanese
_UNICODE_HELLO = u'\u3053\u3093\u306b\u3061\u306f'
# Goodbye in Japanese
_UNICODE_GOODBYE = u'\u3055\u3088\u3046\u306a\u3089'
# Error message
_ERROR_MESSAGE = 'What did you say?'
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
hello = msgutil.receive_message(request)
if hello != _UNICODE_HELLO:
msgutil.send_message(request, _ERROR_MESSAGE)
return
msgutil.send_message(request, _UNICODE_GOODBYE)
| bsd-3-clause |
jalexvig/tensorflow | tensorflow/contrib/factorization/python/kernel_tests/masked_matmul_ops_test.py | 111 | 3662 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
# ==============================================================================
"""Tests for masked_matmul_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-bad-todo, g-import-not-at-top
import numpy as np
from tensorflow.contrib.factorization.python.ops import gen_factorization_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def MakeMask():
inds = [[0, 0], [0, 2], [1, 1], [2, 0], [2, 3]] * 100
indices = np.array(inds).astype(np.int64)
shape = np.array([5, 4]).astype(np.int64)
return (indices, shape)
class MaskedProductOpsTest(test.TestCase):
def setUp(self):
a = [
[0.1, 0.2, 0.3],
[0.4, 0.5, 0.6],
[0.7, 0.8, 0.9],
[1.1, 1.2, 1.3],
[1.4, 1.5, 1.6],
]
b = [
[0.1, 0.4, 0.7, 1.1],
[0.2, 0.5, 0.8, 1.2],
[0.3, 0.6, 0.9, 1.3],
]
self._dot_products = np.array([0.14, 0.5, 0.77, 0.5, 2.9] * 100)
self._a = np.array(a).astype(np.float32)
self._b = np.array(b).astype(np.float32)
self._mask_ind, self._mask_shape = MakeMask()
def _runTestMaskedProduct(self, transpose_a, transpose_b):
with ops.Graph().as_default(), self.test_session() as sess:
a = self._a if not transpose_a else array_ops.transpose(self._a)
b = self._b if not transpose_b else array_ops.transpose(self._b)
def AssertClose(sp_x, sp_y):
x_inds, x_vals, y_inds, y_vals = sess.run(
[sp_x.indices, sp_x.values,
sp_y.indices, sp_y.values])
self.assertAllClose(x_inds, y_inds)
self.assertAllClose(x_vals, y_vals)
values = gen_factorization_ops.masked_matmul(
a, b, self._mask_ind, transpose_a, transpose_b)
result = sparse_tensor.SparseTensor(
self._mask_ind, values, self._mask_shape)
true_result = sparse_tensor.SparseTensor(
self._mask_ind, self._dot_products, self._mask_shape)
AssertClose(result, true_result)
def _runTestEmptyMaskedProduct(self):
with ops.Graph().as_default(), self.test_session() as sess:
empty_mask = constant_op.constant(0, shape=[0, 2], dtype=dtypes.int64)
values = gen_factorization_ops.masked_matmul(
self._a, self._b, empty_mask, False, False)
self.assertEqual(len(values.eval(session=sess)), 0)
def testMaskedProduct(self):
self._runTestMaskedProduct(False, False)
def testMaskedProductTransposeA(self):
self._runTestMaskedProduct(True, False)
def testMaskedProductTransposeB(self):
self._runTestMaskedProduct(False, True)
def testMaskedProductTransposeAAndB(self):
self._runTestMaskedProduct(True, True)
def testEmptyMaskedProduct(self):
self._runTestEmptyMaskedProduct()
if __name__ == "__main__":
test.main()
| apache-2.0 |
nanditav/15712-TensorFlow | tensorflow/python/summary/impl/directory_watcher_test.py | 37 | 6719 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for directory_watcher."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import shutil
from tensorflow.python.framework import test_util
from tensorflow.python.platform import gfile
from tensorflow.python.platform import googletest
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import io_wrapper
class _ByteLoader(object):
"""A loader that loads individual bytes from a file."""
def __init__(self, path):
self._f = open(path)
self.bytes_read = 0
def Load(self):
while True:
self._f.seek(self.bytes_read)
byte = self._f.read(1)
if byte:
self.bytes_read += 1
yield byte
else:
return
class DirectoryWatcherTest(test_util.TensorFlowTestCase):
def setUp(self):
# Put everything in a directory so it's easier to delete.
self._directory = os.path.join(self.get_temp_dir(), 'monitor_dir')
os.mkdir(self._directory)
self._watcher = directory_watcher.DirectoryWatcher(self._directory,
_ByteLoader)
self.stubs = googletest.StubOutForTesting()
def tearDown(self):
self.stubs.CleanUp()
try:
shutil.rmtree(self._directory)
except OSError:
# Some tests delete the directory.
pass
def _WriteToFile(self, filename, data):
path = os.path.join(self._directory, filename)
with open(path, 'a') as f:
f.write(data)
def _LoadAllEvents(self):
"""Loads all events in the watcher."""
for _ in self._watcher.Load():
pass
def assertWatcherYields(self, values):
self.assertEqual(list(self._watcher.Load()), values)
def testRaisesWithBadArguments(self):
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher(None, lambda x: None)
with self.assertRaises(ValueError):
directory_watcher.DirectoryWatcher('dir', None)
def testEmptyDirectory(self):
self.assertWatcherYields([])
def testSingleWrite(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleWrites(self):
self._WriteToFile('a', 'abc')
self.assertWatcherYields(['a', 'b', 'c'])
self._WriteToFile('a', 'xyz')
self.assertWatcherYields(['x', 'y', 'z'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleLoads(self):
self._WriteToFile('a', 'a')
self._watcher.Load()
self._watcher.Load()
self.assertWatcherYields(['a'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testMultipleFilesAtOnce(self):
self._WriteToFile('b', 'b')
self._WriteToFile('a', 'a')
self.assertWatcherYields(['a', 'b'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testFinishesLoadingFileWhenSwitchingToNewFile(self):
self._WriteToFile('a', 'a')
# Empty the iterator.
self.assertEquals(['a'], list(self._watcher.Load()))
self._WriteToFile('a', 'b')
self._WriteToFile('b', 'c')
# The watcher should finish its current file before starting a new one.
self.assertWatcherYields(['b', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testIntermediateEmptyFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', '')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testPathFilter(self):
self._watcher = directory_watcher.DirectoryWatcher(
self._directory, _ByteLoader,
lambda path: 'do_not_watch_me' not in path)
self._WriteToFile('a', 'a')
self._WriteToFile('do_not_watch_me', 'b')
self._WriteToFile('c', 'c')
self.assertWatcherYields(['a', 'c'])
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testDetectsNewOldFiles(self):
self._WriteToFile('b', 'a')
self._LoadAllEvents()
self._WriteToFile('a', 'a')
self._LoadAllEvents()
self.assertTrue(self._watcher.OutOfOrderWritesDetected())
def testIgnoresNewerFiles(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
self._WriteToFile('q', 'a')
self._LoadAllEvents()
self.assertFalse(self._watcher.OutOfOrderWritesDetected())
def testDetectsChangingOldFiles(self):
self._WriteToFile('a', 'a')
self._WriteToFile('b', 'a')
self._LoadAllEvents()
self._WriteToFile('a', 'c')
self._LoadAllEvents()
self.assertTrue(self._watcher.OutOfOrderWritesDetected())
def testDoesntCrashWhenFileIsDeleted(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
os.remove(os.path.join(self._directory, 'a'))
self._WriteToFile('b', 'b')
self.assertWatcherYields(['b'])
def testRaisesRightErrorWhenDirectoryIsDeleted(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
shutil.rmtree(self._directory)
with self.assertRaises(directory_watcher.DirectoryDeletedError):
self._LoadAllEvents()
def testDoesntRaiseDirectoryDeletedErrorIfOutageIsTransient(self):
self._WriteToFile('a', 'a')
self._LoadAllEvents()
shutil.rmtree(self._directory)
# Fake a single transient I/O error.
def FakeFactory(original):
def Fake(*args, **kwargs):
if FakeFactory.has_been_called:
original(*args, **kwargs)
else:
raise OSError('lp0 temporarily on fire')
return Fake
FakeFactory.has_been_called = False
for stub_name in ['ListDirectoryAbsolute', 'ListRecursively']:
self.stubs.Set(io_wrapper, stub_name,
FakeFactory(getattr(io_wrapper, stub_name)))
for stub_name in ['IsDirectory', 'Exists', 'Stat']:
self.stubs.Set(gfile, stub_name,
FakeFactory(getattr(gfile, stub_name)))
with self.assertRaises((IOError, OSError)):
self._LoadAllEvents()
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
zasdfgbnm/tensorflow | tensorflow/contrib/distributions/python/ops/conditional_distribution.py | 120 | 3310 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Conditional distribution base class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import distribution
from tensorflow.python.ops.distributions import util as distribution_util
class ConditionalDistribution(distribution.Distribution):
"""Distribution that supports intrinsic parameters (local latents).
Subclasses of this distribution may have additional keyword arguments passed
to their sample-based methods (i.e. `sample`, `log_prob`, etc.).
"""
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def sample(self, sample_shape=(), seed=None, name="sample",
**condition_kwargs):
return self._call_sample_n(sample_shape, seed, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_prob(self, value, name="log_prob", **condition_kwargs):
return self._call_log_prob(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def prob(self, value, name="prob", **condition_kwargs):
return self._call_prob(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_cdf(self, value, name="log_cdf", **condition_kwargs):
return self._call_log_cdf(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def cdf(self, value, name="cdf", **condition_kwargs):
return self._call_cdf(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def log_survival_function(self, value, name="log_survival_function",
**condition_kwargs):
return self._call_log_survival_function(value, name, **condition_kwargs)
@distribution_util.AppendDocstring(kwargs_dict={
"**condition_kwargs":
"Named arguments forwarded to subclass implementation."})
def survival_function(self, value, name="survival_function",
**condition_kwargs):
return self._call_survival_function(value, name, **condition_kwargs)
| apache-2.0 |
fargalaxy1/geonode-wagtail | geonode/contrib/metadataxsl/views.py | 12 | 1479 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.shortcuts import get_object_or_404
from django.http import HttpResponse
from django.conf import settings
from geonode.base.models import ResourceBase
from geonode.catalogue import get_catalogue
def prefix_xsl_line(req, id):
resource = get_object_or_404(ResourceBase, pk=id)
catalogue = get_catalogue()
record = catalogue.get_record(resource.uuid)
xml = record.xml
xsl_path = '{}/static/metadataxsl/metadata.xsl'.format(settings.SITEURL)
xsl_line = '<?xml-stylesheet type="text/xsl" href="{}"?>'.format(xsl_path)
return HttpResponse(
xsl_line + xml,
content_type="text/xml"
)
| gpl-3.0 |
silly-wacky-3-town-toon/SOURCE-COD | toontown/battle/DistributedBattleFinal.py | 1 | 7784 | from panda3d.core import *
from panda3d.direct import *
from direct.interval.IntervalGlobal import *
from BattleBase import *
from direct.actor import Actor
from toontown.distributed import DelayDelete
from direct.directnotify import DirectNotifyGlobal
import DistributedBattleBase
import MovieUtil
from toontown.suit import Suit
import SuitBattleGlobals
from toontown.toonbase import ToontownBattleGlobals
from toontown.toonbase import ToontownGlobals
from direct.fsm import State
import random
from otp.nametag.NametagConstants import *
from otp.nametag import NametagGlobals
class DistributedBattleFinal(DistributedBattleBase.DistributedBattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleFinal')
def __init__(self, cr):
townBattle = cr.playGame.hood.loader.townBattle
DistributedBattleBase.DistributedBattleBase.__init__(self, cr, townBattle)
self.setupCollisions(self.uniqueBattleName('battle-collide'))
self.bossCog = None
self.bossCogRequest = None
self.streetBattle = 0
self.joiningSuitsName = self.uniqueBattleName('joiningSuits')
self.fsm.addState(State.State('ReservesJoining', self.enterReservesJoining, self.exitReservesJoining, ['WaitForJoin']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('ReservesJoining')
waitForJoinState = self.fsm.getStateNamed('WaitForJoin')
waitForJoinState.addTransition('ReservesJoining')
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('ReservesJoining')
return
def generate(self):
DistributedBattleBase.DistributedBattleBase.generate(self)
def disable(self):
DistributedBattleBase.DistributedBattleBase.disable(self)
base.cr.relatedObjectMgr.abortRequest(self.bossCogRequest)
self.bossCogRequest = None
self.bossCog = None
return
def delete(self):
DistributedBattleBase.DistributedBattleBase.delete(self)
self.removeCollisionData()
def setBossCogId(self, bossCogId):
self.bossCogId = bossCogId
if base.cr.doId2do.has_key(bossCogId):
tempBossCog = base.cr.doId2do[bossCogId]
self.__gotBossCog([tempBossCog])
else:
self.notify.debug('doing relatedObjectMgr.request for bossCog')
self.bossCogRequest = base.cr.relatedObjectMgr.requestObjects([bossCogId], allCallback=self.__gotBossCog)
def __gotBossCog(self, bossCogList):
self.bossCogRequest = None
self.bossCog = bossCogList[0]
currStateName = self.localToonFsm.getCurrentState().getName()
if currStateName == 'NoLocalToon' and self.bossCog.hasLocalToon():
self.enableCollision()
return
def setBattleNumber(self, battleNumber):
self.battleNumber = battleNumber
def setBattleSide(self, battleSide):
self.battleSide = battleSide
def setMembers(self, suits, suitsJoining, suitsPending, suitsActive, suitsLured, suitTraps, toons, toonsJoining, toonsPending, toonsActive, toonsRunning, timestamp):
if self.battleCleanedUp():
return
oldtoons = DistributedBattleBase.DistributedBattleBase.setMembers(self, suits, suitsJoining, suitsPending, suitsActive, suitsLured, suitTraps, toons, toonsJoining, toonsPending, toonsActive, toonsRunning, timestamp)
if len(self.toons) == 4 and len(oldtoons) < 4:
self.notify.debug('setMembers() - battle is now full of toons')
self.closeBattleCollision()
elif len(self.toons) < 4 and len(oldtoons) == 4:
self.openBattleCollision()
def makeSuitJoin(self, suit, ts):
self.notify.debug('makeSuitJoin(%d)' % suit.doId)
self.joiningSuits.append(suit)
if self.hasLocalToon():
self.d_joinDone(base.localAvatar.doId, suit.doId)
def showSuitsJoining(self, suits, ts, name, callback):
if self.bossCog == None:
return
if self.battleSide:
openDoor = Func(self.bossCog.doorB.request, 'open')
closeDoor = Func(self.bossCog.doorB.request, 'close')
else:
openDoor = Func(self.bossCog.doorA.request, 'open')
closeDoor = Func(self.bossCog.doorA.request, 'close')
suitTrack = Parallel()
delay = 0
for suit in suits:
suit.setState('Battle')
if suit.dna.dept == 'l':
suit.reparentTo(self.bossCog)
suit.setPos(0, 0, 0)
suit.setPos(self.bossCog, 0, 0, 0)
suit.headsUp(self)
suit.setScale(3.8 / suit.height)
if suit in self.joiningSuits:
i = len(self.pendingSuits) + self.joiningSuits.index(suit)
destPos, h = self.suitPendingPoints[i]
destHpr = VBase3(h, 0, 0)
else:
destPos, destHpr = self.getActorPosHpr(suit, self.suits)
suitTrack.append(Track((delay, self.createAdjustInterval(suit, destPos, destHpr)), (delay + 1.5, suit.scaleInterval(1.5, 1))))
delay += 1
if self.hasLocalToon():
camera.reparentTo(self)
if random.choice([0, 1]):
camera.setPosHpr(20, -4, 7, 60, 0, 0)
else:
camera.setPosHpr(-20, -4, 7, -60, 0, 0)
done = Func(callback)
track = Sequence(openDoor, suitTrack, closeDoor, done, name=name)
track.start(ts)
self.storeInterval(track, name)
return
def __playReward(self, ts, callback):
toonTracks = Parallel()
for toon in self.toons:
toonTracks.append(Sequence(Func(toon.loop, 'victory'), Wait(FLOOR_REWARD_TIMEOUT), Func(toon.loop, 'neutral')))
name = self.uniqueName('floorReward')
track = Sequence(toonTracks, name=name)
if self.hasLocalToon():
camera.setPos(0, 0, 1)
camera.setHpr(180, 10, 0)
track += [self.bossCog.makeEndOfBattleMovie(self.hasLocalToon()), Func(callback)]
self.storeInterval(track, name)
track.start(ts)
def enterReward(self, ts):
self.notify.debug('enterReward()')
self.disableCollision()
self.delayDeleteMembers()
self.__playReward(ts, self.__handleFloorRewardDone)
return None
def __handleFloorRewardDone(self):
return None
def exitReward(self):
self.notify.debug('exitReward()')
self.clearInterval(self.uniqueName('floorReward'), finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
for toon in self.toons:
toon.startSmooth()
return None
def enterResume(self, ts = 0):
if self.hasLocalToon():
self.removeLocalToon()
self.fsm.requestFinalState()
def exitResume(self):
return None
def enterReservesJoining(self, ts = 0):
self.delayDeleteMembers()
self.showSuitsJoining(self.joiningSuits, ts, self.joiningSuitsName, self.__reservesJoiningDone)
def __reservesJoiningDone(self):
self._removeMembersKeep()
self.doneBarrier()
def exitReservesJoining(self):
self.clearInterval(self.joiningSuitsName)
def enterNoLocalToon(self):
self.notify.debug('enterNoLocalToon()')
if self.bossCog != None and self.bossCog.hasLocalToon():
self.enableCollision()
else:
self.disableCollision()
return
def exitNoLocalToon(self):
self.disableCollision()
return None
def enterWaitForServer(self):
self.notify.debug('enterWaitForServer()')
return None
def exitWaitForServer(self):
return None
| apache-2.0 |
SamiHiltunen/invenio-formatter | invenio_formatter/bundles.py | 9 | 1409 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Formatter bundles."""
from __future__ import unicode_literals
from invenio.ext.assets import Bundle
css = Bundle(
"css/formatter/templates_brief.css",
"css/formatter/templates_detailed.css",
output="formatter.css",
weight=60,
filters="cleancss",
bower={
# Used by Video_HTML_detailed.
"mediaelement": "latest",
# 1.4.0 is from 2011 !!!
# ----------------------
# http://www.keith-wood.name/bookmark.html
"jquery.bookmark": "http://invenio-software.org/download/jquery/"
"jquery.bookmark.package-1.4.0.zip" # orphan
}
)
| gpl-2.0 |
bwrsandman/OpenUpgrade | addons/base_import/models.py | 222 | 14243 | import csv
import itertools
import logging
import operator
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import psycopg2
from openerp.osv import orm, fields
from openerp.tools.translate import _
FIELDS_RECURSION_LIMIT = 2
ERROR_PREVIEW_BYTES = 200
_logger = logging.getLogger(__name__)
class ir_import(orm.TransientModel):
_name = 'base_import.import'
# allow imports to survive for 12h in case user is slow
_transient_max_hours = 12.0
_columns = {
'res_model': fields.char('Model'),
'file': fields.binary(
'File', help="File to check and/or import, raw binary (not base64)"),
'file_name': fields.char('File Name'),
'file_type': fields.char('File Type'),
}
def get_fields(self, cr, uid, model, context=None,
depth=FIELDS_RECURSION_LIMIT):
""" Recursively get fields for the provided model (through
fields_get) and filter them according to importability
The output format is a list of ``Field``, with ``Field``
defined as:
.. class:: Field
.. attribute:: id (str)
A non-unique identifier for the field, used to compute
the span of the ``required`` attribute: if multiple
``required`` fields have the same id, only one of them
is necessary.
.. attribute:: name (str)
The field's logical (Odoo) name within the scope of
its parent.
.. attribute:: string (str)
The field's human-readable name (``@string``)
.. attribute:: required (bool)
Whether the field is marked as required in the
model. Clients must provide non-empty import values
for all required fields or the import will error out.
.. attribute:: fields (list(Field))
The current field's subfields. The database and
external identifiers for m2o and m2m fields; a
filtered and transformed fields_get for o2m fields (to
a variable depth defined by ``depth``).
Fields with no sub-fields will have an empty list of
sub-fields.
:param str model: name of the model to get fields form
:param int landing: depth of recursion into o2m fields
"""
model_obj = self.pool[model]
fields = [{
'id': 'id',
'name': 'id',
'string': _("External ID"),
'required': False,
'fields': [],
}]
fields_got = model_obj.fields_get(cr, uid, context=context)
blacklist = orm.MAGIC_COLUMNS + [model_obj.CONCURRENCY_CHECK_FIELD]
for name, field in fields_got.iteritems():
if name in blacklist:
continue
# an empty string means the field is deprecated, @deprecated must
# be absent or False to mean not-deprecated
if field.get('deprecated', False) is not False:
continue
if field.get('readonly'):
states = field.get('states')
if not states:
continue
# states = {state: [(attr, value), (attr2, value2)], state2:...}
if not any(attr == 'readonly' and value is False
for attr, value in itertools.chain.from_iterable(
states.itervalues())):
continue
f = {
'id': name,
'name': name,
'string': field['string'],
# Y U NO ALWAYS HAS REQUIRED
'required': bool(field.get('required')),
'fields': [],
}
if field['type'] in ('many2many', 'many2one'):
f['fields'] = [
dict(f, name='id', string=_("External ID")),
dict(f, name='.id', string=_("Database ID")),
]
elif field['type'] == 'one2many' and depth:
f['fields'] = self.get_fields(
cr, uid, field['relation'], context=context, depth=depth-1)
if self.pool['res.users'].has_group(cr, uid, 'base.group_no_one'):
f['fields'].append({'id' : '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': []})
fields.append(f)
# TODO: cache on model?
return fields
def _read_csv(self, record, options):
""" Returns a CSV-parsed iterator of all empty lines in the file
:throws csv.Error: if an error is detected during CSV parsing
:throws UnicodeDecodeError: if ``options.encoding`` is incorrect
"""
csv_iterator = csv.reader(
StringIO(record.file),
quotechar=str(options['quoting']),
delimiter=str(options['separator']))
def nonempty(row):
return any(x for x in row if x.strip())
csv_nonempty = itertools.ifilter(nonempty, csv_iterator)
# TODO: guess encoding with chardet? Or https://github.com/aadsm/jschardet
encoding = options.get('encoding', 'utf-8')
return itertools.imap(
lambda row: [item.decode(encoding) for item in row],
csv_nonempty)
def _match_header(self, header, fields, options):
""" Attempts to match a given header to a field of the
imported model.
:param str header: header name from the CSV file
:param fields:
:param dict options:
:returns: an empty list if the header couldn't be matched, or
all the fields to traverse
:rtype: list(Field)
"""
string_match = None
for field in fields:
# FIXME: should match all translations & original
# TODO: use string distance (levenshtein? hamming?)
if header.lower() == field['name'].lower():
return [field]
if header.lower() == field['string'].lower():
# matching string are not reliable way because
# strings have no unique constraint
string_match = field
if string_match:
# this behavior is only applied if there is no matching field['name']
return [string_match]
if '/' not in header:
return []
# relational field path
traversal = []
subfields = fields
# Iteratively dive into fields tree
for section in header.split('/'):
# Strip section in case spaces are added around '/' for
# readability of paths
match = self._match_header(section.strip(), subfields, options)
# Any match failure, exit
if not match: return []
# prep subfields for next iteration within match[0]
field = match[0]
subfields = field['fields']
traversal.append(field)
return traversal
def _match_headers(self, rows, fields, options):
""" Attempts to match the imported model's fields to the
titles of the parsed CSV file, if the file is supposed to have
headers.
Will consume the first line of the ``rows`` iterator.
Returns a pair of (None, None) if headers were not requested
or the list of headers and a dict mapping cell indices
to key paths in the ``fields`` tree
:param Iterator rows:
:param dict fields:
:param dict options:
:rtype: (None, None) | (list(str), dict(int: list(str)))
"""
if not options.get('headers'):
return None, None
headers = next(rows)
return headers, dict(
(index, [field['name'] for field in self._match_header(header, fields, options)] or None)
for index, header in enumerate(headers)
)
def parse_preview(self, cr, uid, id, options, count=10, context=None):
""" Generates a preview of the uploaded files, and performs
fields-matching between the import's file data and the model's
columns.
If the headers are not requested (not options.headers),
``matches`` and ``headers`` are both ``False``.
:param id: identifier of the import
:param int count: number of preview lines to generate
:param options: format-specific options.
CSV: {encoding, quoting, separator, headers}
:type options: {str, str, str, bool}
:returns: {fields, matches, headers, preview} | {error, preview}
:rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str}
"""
(record,) = self.browse(cr, uid, [id], context=context)
fields = self.get_fields(cr, uid, record.res_model, context=context)
try:
rows = self._read_csv(record, options)
headers, matches = self._match_headers(rows, fields, options)
# Match should have consumed the first row (iif headers), get
# the ``count`` next rows for preview
preview = list(itertools.islice(rows, count))
assert preview, "CSV file seems to have no content"
return {
'fields': fields,
'matches': matches or False,
'headers': headers or False,
'preview': preview,
}
except Exception, e:
# Due to lazy generators, UnicodeDecodeError (for
# instance) may only be raised when serializing the
# preview to a list in the return.
_logger.debug("Error during CSV parsing preview", exc_info=True)
return {
'error': str(e),
# iso-8859-1 ensures decoding will always succeed,
# even if it yields non-printable characters. This is
# in case of UnicodeDecodeError (or csv.Error
# compounded with UnicodeDecodeError)
'preview': record.file[:ERROR_PREVIEW_BYTES]
.decode( 'iso-8859-1'),
}
def _convert_import_data(self, record, fields, options, context=None):
""" Extracts the input browse_record and fields list (with
``False``-y placeholders for fields to *not* import) into a
format Model.import_data can use: a fields list without holes
and the precisely matching data matrix
:param browse_record record:
:param list(str|bool): fields
:returns: (data, fields)
:rtype: (list(list(str)), list(str))
:raises ValueError: in case the import data could not be converted
"""
# Get indices for non-empty fields
indices = [index for index, field in enumerate(fields) if field]
if not indices:
raise ValueError(_("You must configure at least one field to import"))
# If only one index, itemgetter will return an atom rather
# than a 1-tuple
if len(indices) == 1: mapper = lambda row: [row[indices[0]]]
else: mapper = operator.itemgetter(*indices)
# Get only list of actually imported fields
import_fields = filter(None, fields)
rows_to_import = self._read_csv(record, options)
if options.get('headers'):
rows_to_import = itertools.islice(
rows_to_import, 1, None)
data = [
row for row in itertools.imap(mapper, rows_to_import)
# don't try inserting completely empty rows (e.g. from
# filtering out o2m fields)
if any(row)
]
return data, import_fields
def do(self, cr, uid, id, fields, options, dryrun=False, context=None):
""" Actual execution of the import
:param fields: import mapping: maps each column to a field,
``False`` for the columns to ignore
:type fields: list(str|bool)
:param dict options:
:param bool dryrun: performs all import operations (and
validations) but rollbacks writes, allows
getting as much errors as possible without
the risk of clobbering the database.
:returns: A list of errors. If the list is empty the import
executed fully and correctly. If the list is
non-empty it contains dicts with 3 keys ``type`` the
type of error (``error|warning``); ``message`` the
error message associated with the error (a string)
and ``record`` the data which failed to import (or
``false`` if that data isn't available or provided)
:rtype: list({type, message, record})
"""
cr.execute('SAVEPOINT import')
(record,) = self.browse(cr, uid, [id], context=context)
try:
data, import_fields = self._convert_import_data(
record, fields, options, context=context)
except ValueError, e:
return [{
'type': 'error',
'message': unicode(e),
'record': False,
}]
_logger.info('importing %d rows...', len(data))
import_result = self.pool[record.res_model].load(
cr, uid, import_fields, data, context=context)
_logger.info('done')
# If transaction aborted, RELEASE SAVEPOINT is going to raise
# an InternalError (ROLLBACK should work, maybe). Ignore that.
# TODO: to handle multiple errors, create savepoint around
# write and release it in case of write error (after
# adding error to errors array) => can keep on trying to
# import stuff, and rollback at the end if there is any
# error in the results.
try:
if dryrun:
cr.execute('ROLLBACK TO SAVEPOINT import')
else:
cr.execute('RELEASE SAVEPOINT import')
except psycopg2.InternalError:
pass
return import_result['messages']
| agpl-3.0 |
yograterol/django | tests/modeladmin/models.py | 108 | 1603 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, models.CASCADE, related_name='main_concerts')
opening_band = models.ForeignKey(Band, models.CASCADE, related_name='opening_concerts',
blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField(default=False)
pub_date = models.DateTimeField()
band = models.ForeignKey(Band, models.CASCADE)
no = models.IntegerField(verbose_name="Number", blank=True, null=True) # This field is intentionally 2 characters long. See #16080.
def decade_published_in(self):
return self.pub_date.strftime('%Y')[:3] + "0's"
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel, models.CASCADE)
| bsd-3-clause |
mkieszek/jobsplus | jobsplus_recruitment/wizard/jp_recruiter2deal.py | 1 | 4127 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 26 09:48:26 2013
@author: mbereda
"""
from openerp.osv import osv,fields
from openerp.tools.translate import _
import pdb
import datetime
class jp_recruiter2deal(osv.Model):
_name = 'jp.recruiter2deal'
_columns = {
'deal_id': fields.many2one('jp.deal','Deal', readonly=True),
'recruiter_id': fields.many2one('res.users', 'Recruiter', required=True),
'date_middle': fields.date('Middle date', track_visibility='onchange'),
'handover_date': fields.date('Handover date', track_visibility='onchange'),
}
def default_get(self, cr, uid, deal_id, context=None):
"""
This function gets default values
"""
#pdb.set_trace()
res = super(jp_recruiter2deal, self).default_get(cr, uid, deal_id, context=context)
deal_id = context and context.get('active_id', False) or False
res.update({'deal_id': deal_id or False})
deal = self.pool.get('jp.deal').browse(cr,uid,[deal_id], context=context)[0]
recruiter_id = deal.recruiter_id and deal.recruiter_id.id or False
res.update({'recruiter_id': recruiter_id})
date_middle = deal.date_middle
res.update({'date_middle': date_middle})
res.update({'handover_date': deal.handover_date})
return res
def assign_recruiter(self, cr, uid, deal_id, context=None):
#pdb.set_trace()
w = self.browse(cr, uid, deal_id, context=context)[0]
deal_id = context and context.get('active_id', False) or False
values = {
'recruiter_id': w.recruiter_id and w.recruiter_id.id or False,
'date_middle': w.date_middle,
'handover_date': w.handover_date,
}
recruiter_id = w.recruiter_id.id
recruiter = self.pool.get('res.users').browse(cr, uid, [recruiter_id])[0]
deal_obj = self.pool.get('jp.deal')
deal_obj.write(cr, uid, [deal_id], values, context=context)
mail_to = recruiter.email
if mail_to is not "":
deal = deal_obj.browse(cr, uid, deal_id)
users_obj = self.pool.get('res.users')
jp_config_obj = self.pool.get('jp.config.settings')
jp_config_id = jp_config_obj.search(cr, uid, [])[-1]
jp_crm = jp_config_obj.browse(cr, uid, jp_config_id).jobsplus_crm
url = ("http://%s/?db=%s#id=%s&view_type=form&model=jp.deal")%(jp_crm, cr.dbname, deal.id)
subject = "Recruiter %s assigned to deal %s"
body = "Recruiter has been assigned to deal.<br/>Recruiter: %s<br/>Deal: %s<br/>Middle date: %s<br/>Finish date: %s<br/><a href='%s'>Link to Deal</a>"
uid = users_obj.search(cr, uid, [('id','=',1)])[0]
uid_id = users_obj.browse(cr, uid, uid)
translation_obj = self.pool.get('ir.translation')
if w.recruiter_id.partner_id.lang == 'pl_PL':
transl = translation_obj.search(cr, uid, [('src','=',body)])
transl_sub = translation_obj.search(cr, uid, [('src','=',subject)])
if transl:
trans = translation_obj.browse(cr, uid, transl)[0]
body = trans.value
if transl_sub:
trans_sub = translation_obj.browse(cr, uid, transl_sub)[0]
subject = trans_sub.value
email_from = uid_id.partner_id.name+"<"+uid_id.partner_id.email+">"
vals = {'email_from': email_from,
'email_to': mail_to,
'state': 'outgoing',
'subject': subject % (w.recruiter_id.name, deal.title),
'body_html': body % (deal.recruiter_id.name, deal.name, deal.date_middle, deal.handover_date, url),
'auto_delete': True}
self.pool.get('mail.mail').create(cr, uid, vals, context=context) | agpl-3.0 |
ecosoft-odoo/odoo | addons/l10n_ma/l10n_ma.py | 336 | 1952 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class l10n_ma_report(osv.osv):
_name = 'l10n.ma.report'
_description = 'Report for l10n_ma_kzc'
_columns = {
'code': fields.char('Code', size=64),
'name': fields.char('Name'),
'line_ids': fields.one2many('l10n.ma.line', 'report_id', 'Lines', copy=True),
}
_sql_constraints = [
('code_uniq', 'unique (code)','The code report must be unique !')
]
class l10n_ma_line(osv.osv):
_name = 'l10n.ma.line'
_description = 'Report Lines for l10n_ma'
_columns = {
'code': fields.char('Variable Name', size=64),
'definition': fields.char('Definition'),
'name': fields.char('Name'),
'report_id': fields.many2one('l10n.ma.report', 'Report'),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The variable name must be unique !')
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MSylvia/pyNES | pynes/tests/commandline_test.py | 28 | 3400 | # -*- coding: utf-8 -*-
from pynes import main
from pynes.tests import FileTestCase
from mock import patch
class CommandLineTest(FileTestCase):
@patch('pynes.compiler.compile_file')
def test_asm(self, compiler):
main("pynes asm fixtures/movingsprite/movingsprite.asm".split())
compiler.assert_called_once_with(
'fixtures/movingsprite/movingsprite.asm',
output=None, path=None)
@patch('pynes.compiler.compile_file')
def test_asm_with_output(self, compiler):
main("pynes asm fixtures/movingsprite/movingsprite.asm --output"
" /tmp/movingsprite.nes".split())
compiler.assert_called_once_with(
'fixtures/movingsprite/movingsprite.asm',
output='/tmp/movingsprite.nes', path=None)
@patch('pynes.compiler.compile_file')
def test_asm_with_path(self, compiler):
main("pynes asm fixtures/movingsprite/movingsprite.asm --path "
"fixtures/movingsprite".split())
compiler.assert_called_once_with(
'fixtures/movingsprite/movingsprite.asm',
output=None, path='fixtures/movingsprite')
@patch('pynes.composer.compose_file')
def test_py(self, composer):
main("pynes py pynes/examples/movingsprite.py".split())
composer.assert_called_once_with(
'pynes/examples/movingsprite.py',
output=None, asm=False, path=None)
@patch('pynes.composer.compose_file')
def test_py_with_asm(self, composer):
main("pynes py pynes/examples/movingsprite.py --asm".split())
composer.assert_called_once_with(
'pynes/examples/movingsprite.py',
output=None, asm=True, path=None)
@patch('pynes.composer.compose_file')
def test_py_with_output(self, composer):
main("pynes py pynes/examples/movingsprite.py --output "
"output.nes".split())
composer.assert_called_once_with(
'pynes/examples/movingsprite.py',
output='output.nes', asm=False, path=None)
@patch('pynes.composer.compose_file')
def test_py_with_path(self, composer):
main("pynes py pynes/examples/movingsprite.py --path "
"fixtures/movingsprite".split())
composer.assert_called_once_with(
'pynes/examples/movingsprite.py',
output=None, path='fixtures/movingsprite', asm=False)
def test_py_real_build_movingsprite(self):
args = (
"pynes py pynes/examples/movingsprite.py "
"--path fixtures/movingsprite "
"--output pynes/examples/movingsprite.nes"
).split()
main(args)
def test_py_real_build_mario(self):
args = (
"pynes py pynes/examples/mario.py "
"--path fixtures/nerdynights/scrolling "
"--output pynes/examples/mario.nes"
).split()
main(args)
def test_py_real_build_helloworld(self):
args = (
"pynes py pynes/examples/helloworld.py "
"--path fixtures/nerdynights/scrolling "
"--output pynes/examples/helloworld.nes"
).split()
main(args)
def test_py_real_build_slides(self):
args = (
"pynes py pynes/examples/slides.py "
"--path fixtures/nerdynights/scrolling "
"--output pynes/examples/slides.nes --asm"
).split()
main(args)
| bsd-3-clause |
kaos/ecos | packages/hal/arm/gps4020/current/support/download/download.py | 11 | 3416 | #! /usr/bin/env python
#
# Copyright (C) 2003, MLB Associates
#
# This program is used to read a Motorola S-record file and
# download it using the GDB protocol.
import os, string, sys, time
trace = open("/tmp/download.trace", "w")
#
# Use up some time
#
def spin():
j = 0
for i in range(0,200):
j = j + 1
#
# Compute the checksum for a string
#
def cksum(str):
# sys.stderr.write("cksum %s\n" % str)
sum = 0
cs = str[1:]
while cs:
sum = sum + ord(cs[:1])
cs = cs[1:]
return sum & 0xFF
#
# Send a string via the GDB protocol. Note: this routine
# computes and adds the checksum before starting.
#
def send(str):
str = str + "#%02x" % cksum(str)
# trace.write("ready to send: %s\n" % str)
# trace.flush()
while 1:
s = str
while s:
os.write(1, s[:1])
spin()
# time.sleep(0.001)
s = s[1:]
c = os.read(0, 1)
if c <> '+':
trace.write("~ACK: %c\n" % c)
trace.write("sent: %s\n" % str)
trace.flush()
continue
res = ''
while 1:
c = os.read(0, 1)
if c == '#': break
res = res + c
# trace.write("ACK: %c, res: %s\n" % (c, res))
# trace.flush()
# trace.write("res = %s\n" % res)
# trace.flush()
csum = cksum(res)
cs = os.read(0, 1)
cs = cs + os.read(0, 1)
sum = string.atoi(cs, 16)
if csum <> sum:
os.write(1, '-')
trace.write("RES = %s, sum: %x/%x\n" % (res, csum, sum))
trace.write("sent: %s\n" % str)
trace.flush()
continue
os.write(1, '+')
trace.flush()
return
#
# Process a stream of S-records, supplied by 'readline()'
#
def download(readline):
# send("$Hc-1")
# send("$Hg0")
last_addr = 0
while 1:
line = readline()
if not line: break
if line[0] <> 'S':
raise ("Invalid input:" + line)
if line[1] in "123":
len = string.atoi(line[2:4],16)
an = ord(line[1]) - ord('1') + 2
ae = 4 + (an*2)
addr = string.atoi(line[4:ae],16)
#print "len = %d, addr = 0x%x " % (len, addr)
len = len - (an+1)
line = line[ae:]
out = "$M%x,%x:" % (addr, len)
for i in range(0,len):
val = string.atoi(line[:2],16)
#print "val = 0x%x" % val
line = line[2:]
out = out + "%02x" % val
if (addr - last_addr) >= 0x400:
last_addr = addr
sys.stderr.write("0x%x\n" % addr)
send(out)
elif line[1] in "789":
len = string.atoi(line[2:4],16)
eos = 10
if line[1] == '7':
eos = 12
addr = string.atoi(line[4:12],16)
#print "len = %d, addr = 0x%x " % (len, addr)
len = len - 4
line = line[eos:]
out = "$P40=%08x" % addr
sys.stderr.write("Set PC = 0x%x\n" % addr)
send(out)
# This command starts the program
send("$c#63")
if __name__ == '__main__': # testing
import sys
if len(sys.argv) > 1: download(open(sys.argv[1]).readline)
else: download(sys.stdin.readline)
| gpl-2.0 |
haoxli/crosswalk-test-suite | tools/xml/csv2xml.py | 18 | 10254 | #!/usr/bin/python
# encoding:utf-8
# Copyright (c) 2014 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Liu, xin <xinx.liu@intel.com>
# Li, Hao <haox.li@intel.com>
import os
import csv
import re
import sys
import platform
import logging
import logging.handlers
from xml.etree import ElementTree
LOG = None
LOG_LEVEL = logging.DEBUG
class Set():
set_name = ""
set_type = ""
ui_auto = ""
testcase = []
def __init__(self, setname, settype, uiauto):
self.set_name = setname
self.set_type = settype
self.ui_auto = uiauto
self.testcase = []
def __init__(self, setname, settype, uiauto, testcase):
self.set_name = setname
self.set_type = settype
self.ui_auto = uiauto
self.testcase = testcase
class TestCase():
case_id = ""
purpose = ""
component = ""
priority = ""
execution_type = ""
status = ""
case_type = ""
onload_delay = ""
subcase = ""
pre_condition = ""
post_condition = ""
steps = []
test_script_entry = ""
refer_test_script_entry = ""
bdd_test_script_entry = ""
spec_category = ""
spec_section = ""
spec_specification = ""
spec_interface = ""
spec_element_name = ""
spec_element_type = ""
spec_url = ""
spec_statement = ""
def __init__(self, caseid, purpose, component, priority, executiontype, status, casetype,\
onloaddelay, subcase, precondition, postcondition, steps, testscriptentry,\
refertestscriptentry, bddtestscriptentry, speccategory, specsection,\
specification, specinterface, specelementname, specelementtype, specurl):
self.case_id = caseid
self.purpose = purpose
self.component = component
self.priority = priority
self.execution_type = executiontype
self.status = status
self.case_type = casetype
self.onload_delay = onloaddelay
self.subcase = subcase
self.pre_condition = precondition
self.post_condition = postcondition
self.steps = steps
self.test_script_entry = testscriptentry
self.refer_test_script_entry = refertestscriptentry
self.bdd_test_script_entry = bddtestscriptentry
self.spec_category = speccategory
self.spec_section = specsection
self.spec_specification = specification
self.spec_interface = specinterface
self.spec_element_name = specelementname
self.spec_element_type = specelementtype
self.spec_url = specurl
class ColorFormatter(logging.Formatter):
def __init__(self, msg):
logging.Formatter.__init__(self, msg)
def format(self, record):
red, green, yellow, blue = range(4)
colors = {'INFO': green, 'DEBUG': blue,
'WARNING': yellow, 'ERROR': red}
msg = record.msg
if msg[0] == "+":
msg = "\33[01m" + msg[1:] + "\033[0m"
elif msg[0] == "=":
msg = "\33[07m" + msg + "\033[0m"
levelname = record.levelname
if levelname in colors:
msg_color = "\033[0;%dm" % (
31 + colors[levelname]) + msg + "\033[0m"
record.msg = msg_color
return logging.Formatter.format(self, record)
def csv2full(csv_path, split_sign):
if not os.path.isfile(csv_path):
print '%s is not a file' % csv_path
return
name, ext = os.path.splitext(csv_path)
if not ext == '.csv':
print '%s is not a csv' % csv_path
return
LOG.info("+Convert csv to test.full.xml start ...")
csv_file = file(csv_path, 'rb')
csv_file.readline()
reader = csv.reader(csv_file)
test_suite = {}
for line in reader:
if test_suite.get(line[0]) is None:
testset = Set(line[0], line[1], line[2], [])
test_suite[line[0]] = testset
testcase = TestCase(line[3], line[4], line[5], line[8], line[7], line[17], line[22],\
str(line[6]), str(line[21]), line[23], line[24], line[25], line[18], line[19],\
line[20], line[14], line[13], line[12], line[11], line[10], line[9], line[15])
test_suite[line[0]].testcase.append(testcase)
csv_file.close()
suite_name = test_suite.values()[0].testcase[0].test_script_entry.split('/')[2]
category_name = test_suite.values()[0].testcase[0].component.split('/')[0]
folder = os.path.dirname(csv_path)
full_test_path = '%s%s%s-tests.full.xml' % (folder, split_sign, suite_name)
make_full_test(
test_suite,
full_test_path,
suite_name,
category_name)
LOG.info('General %s' % full_test_path)
def make_full_test(test_suite, full_test_name, suite_name, category_name):
full_test_file = open(full_test_name, 'w')
content = '<?xml version="1.0" encoding="UTF-8"?>\n'\
+ '<?xml-stylesheet type="text/xsl" href="./testcase.xsl"?>\n'\
+ '<test_definition>\n'\
+ ' <suite category="%s" name="%s">\n' % (category_name, suite_name)
for testset in test_suite.values():
set_ui_auto = ""
if testset.ui_auto is not "":
set_ui_auto = ' ui-auto="%s"' % testset.ui_auto
content += ' <set name="%s" type="%s"%s>\n' % (testset.set_name, testset.set_type, set_ui_auto)
testcasestr = ""
for testcase in testset.testcase:
onload_delay = ' onload_delay="%s"' % testcase.onload_delay if testcase.onload_delay is not "" else ""
subcase = ' subcase="%s"' % testcase.subcase if testcase.subcase is not "" else ""
pre_condition = '\
<pre_condition>\n\
%s\n\
</pre_condition>\n' % testcase.pre_condition if testcase.pre_condition is not "" else ""
post_condition = '\
<post_condition>\n\
%s\n\
</post_condition>\n' % testcase.post_condition if testcase.post_condition is not "" else ""
refer_test_script_entry = " <refer_test_script_entry>%s</refer_test_script_entry>\n" \
% testcase.post_condition if testcase.post_condition is not "" else ""
bdd_test_script_entry = " <bdd_test_script_entry>%s</bdd_test_script_entry>\n" \
% testcase.bdd_test_script_entry if testcase.bdd_test_script_entry is not "" else ""
testcasestr += '\
<testcase purpose="%s" component="%s" type="%s" status="%s" execution_type="%s" priority="%s" id="%s"%s%s>\n\
<description>\n%s%s\
<test_script_entry>%s</test_script_entry>\n%s%s\
</description>\n\
<specs>\n\
<spec>\n\
<spec_assertion element_type="%s" element_name="%s" interface="%s" specification="%s" section="%s" category="%s"/>\n\
<spec_url>%s</spec_url>\n\
<spec_statement/>\n\
</spec>\n\
</specs>\n\
</testcase>\n' % (testcase.purpose, testcase.component, testcase.case_type, testcase.status, testcase.execution_type,\
testcase.priority, testcase.case_id, onload_delay, subcase, pre_condition, post_condition,\
testcase.test_script_entry, refer_test_script_entry, bdd_test_script_entry, testcase.spec_element_type,\
testcase.spec_element_name, testcase.spec_interface, testcase.spec_specification, testcase.spec_section,\
testcase.spec_category, testcase.spec_url)
content += testcasestr\
+ ' </set>\n'
content += ' </suite>\n</test_definition>'
full_test_file.seek(0)
full_test_file.truncate()
full_test_file.write(content)
full_test_file.close()
def echo_about():
"""
This function will print the user guide and stop toolkit.
"""
about = 'csv2xml V1.0\n-c <path> | Convert csv file to tests.full.xml and tests.xml\n'
print about
sys.exit()
def main():
"""
main function will call different functions according to the command line argvs followed the toolkit.
"""
global LOG
LOG = logging.getLogger("pack-tool")
LOG.setLevel(LOG_LEVEL)
stream_handler = logging.StreamHandler()
stream_handler.setLevel(LOG_LEVEL)
stream_formatter = ColorFormatter("[%(asctime)s] %(message)s")
stream_handler.setFormatter(stream_formatter)
LOG.addHandler(stream_handler)
sys_name = platform.system()
if sys_name == 'Windows':
split_sign = '\\'
elif sys_name == 'Linux':
split_sign = '/'
if len(sys.argv) != 3:
print 'Error: No enough argv!'
echo_about()
else:
{'-c': lambda: csv2full(sys.argv[2], split_sign)}[sys.argv[1]]()
if __name__ == '__main__':
main()
| bsd-3-clause |
manojhirway/ExistingImagesOnNFS | cinder/exception.py | 8 | 27813 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Cinder base exception handling.
Includes decorator for re-raising Cinder-type exceptions.
SHOULD include dedicated exception logging.
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_versionedobjects import exception as obj_exc
import six
import webob.exc
from cinder.i18n import _, _LE
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts)
class ConvertedException(webob.exc.WSGIHTTPException):
def __init__(self, code=0, title="", explanation=""):
self.code = code
self.title = title
self.explanation = explanation
super(ConvertedException, self).__init__()
class Error(Exception):
pass
class CinderException(Exception):
"""Base Cinder Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
self.kwargs['message'] = message
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
for k, v in self.kwargs.items():
if isinstance(v, Exception):
self.kwargs[k] = six.text_type(v)
if self._should_format():
try:
message = self.message % kwargs
except Exception:
exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"),
{'name': name, 'value': value})
if CONF.fatal_exception_format_errors:
six.reraise(*exc_info)
# at least get the core message out if something happened
message = self.message
elif isinstance(message, Exception):
message = six.text_type(message)
# NOTE(luisg): We put the actual message in 'msg' so that we can access
# it, because if we try to access the message via 'message' it will be
# overshadowed by the class' message attribute
self.msg = message
super(CinderException, self).__init__(message)
def _should_format(self):
return self.kwargs['message'] is None or '%(message)' in self.message
def __unicode__(self):
return six.text_type(self.msg)
class VolumeBackendAPIException(CinderException):
message = _("Bad or unexpected response from the storage volume "
"backend API: %(data)s")
class VolumeDriverException(CinderException):
message = _("Volume driver reported an error: %(message)s")
class BackupDriverException(CinderException):
message = _("Backup driver reported an error: %(message)s")
class GlanceConnectionFailed(CinderException):
message = _("Connection to glance failed: %(reason)s")
class NotAuthorized(CinderException):
message = _("Not authorized.")
code = 403
class AdminRequired(NotAuthorized):
message = _("User does not have admin privileges")
class PolicyNotAuthorized(NotAuthorized):
message = _("Policy doesn't allow %(action)s to be performed.")
class ImageNotAuthorized(CinderException):
message = _("Not authorized for image %(image_id)s.")
class DriverNotInitialized(CinderException):
message = _("Volume driver not ready.")
class Invalid(CinderException):
message = _("Unacceptable parameters.")
code = 400
class InvalidSnapshot(Invalid):
message = _("Invalid snapshot: %(reason)s")
class InvalidVolumeAttachMode(Invalid):
message = _("Invalid attaching mode '%(mode)s' for "
"volume %(volume_id)s.")
class VolumeAttached(Invalid):
message = _("Volume %(volume_id)s is still attached, detach volume first.")
class InvalidResults(Invalid):
message = _("The results are invalid.")
class InvalidInput(Invalid):
message = _("Invalid input received: %(reason)s")
class InvalidVolumeType(Invalid):
message = _("Invalid volume type: %(reason)s")
class InvalidVolume(Invalid):
message = _("Invalid volume: %(reason)s")
class InvalidContentType(Invalid):
message = _("Invalid content type %(content_type)s.")
class InvalidHost(Invalid):
message = _("Invalid host: %(reason)s")
# Cannot be templated as the error syntax varies.
# msg needs to be constructed when raised.
class InvalidParameterValue(Invalid):
message = _("%(err)s")
class InvalidAuthKey(Invalid):
message = _("Invalid auth key: %(reason)s")
class InvalidConfigurationValue(Invalid):
message = _('Value "%(value)s" is not valid for '
'configuration option "%(option)s"')
class ServiceUnavailable(Invalid):
message = _("Service is unavailable at this time.")
class ImageUnacceptable(Invalid):
message = _("Image %(image_id)s is unacceptable: %(reason)s")
class DeviceUnavailable(Invalid):
message = _("The device in the path %(path)s is unavailable: %(reason)s")
class InvalidUUID(Invalid):
message = _("Expected a uuid but received %(uuid)s.")
class APIException(CinderException):
message = _("Error while requesting %(service)s API.")
def __init__(self, message=None, **kwargs):
if 'service' not in kwargs:
kwargs['service'] = 'unknown'
super(APIException, self).__init__(message, **kwargs)
class APITimeout(APIException):
message = _("Timeout while requesting %(service)s API.")
class NotFound(CinderException):
message = _("Resource could not be found.")
code = 404
safe = True
class VolumeNotFound(NotFound):
message = _("Volume %(volume_id)s could not be found.")
class VolumeAttachmentNotFound(NotFound):
message = _("Volume attachment could not be found with "
"filter: %(filter)s .")
class VolumeMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeAdminMetadataNotFound(NotFound):
message = _("Volume %(volume_id)s has no administration metadata with "
"key %(metadata_key)s.")
class InvalidVolumeMetadata(Invalid):
message = _("Invalid metadata: %(reason)s")
class InvalidVolumeMetadataSize(Invalid):
message = _("Invalid metadata size: %(reason)s")
class SnapshotMetadataNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s has no metadata with "
"key %(metadata_key)s.")
class VolumeTypeNotFound(NotFound):
message = _("Volume type %(volume_type_id)s could not be found.")
class VolumeTypeNotFoundByName(VolumeTypeNotFound):
message = _("Volume type with name %(volume_type_name)s "
"could not be found.")
class VolumeTypeAccessNotFound(NotFound):
message = _("Volume type access not found for %(volume_type_id)s / "
"%(project_id)s combination.")
class VolumeTypeExtraSpecsNotFound(NotFound):
message = _("Volume Type %(volume_type_id)s has no extra specs with "
"key %(extra_specs_key)s.")
class VolumeTypeInUse(CinderException):
message = _("Volume Type %(volume_type_id)s deletion is not allowed with "
"volumes present with the type.")
class SnapshotNotFound(NotFound):
message = _("Snapshot %(snapshot_id)s could not be found.")
class ServerNotFound(NotFound):
message = _("Instance %(uuid)s could not be found.")
class VolumeIsBusy(CinderException):
message = _("deleting volume %(volume_name)s that has snapshot")
class SnapshotIsBusy(CinderException):
message = _("deleting snapshot %(snapshot_name)s that has "
"dependent volumes")
class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.")
class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.")
class ImageNotFound(NotFound):
message = _("Image %(image_id)s could not be found.")
class ServiceNotFound(NotFound):
message = _("Service %(service_id)s could not be found.")
class HostNotFound(NotFound):
message = _("Host %(host)s could not be found.")
class SchedulerHostFilterNotFound(NotFound):
message = _("Scheduler Host Filter %(filter_name)s could not be found.")
class SchedulerHostWeigherNotFound(NotFound):
message = _("Scheduler Host Weigher %(weigher_name)s could not be found.")
class HostBinaryNotFound(NotFound):
message = _("Could not find binary %(binary)s on host %(host)s.")
class InvalidReservationExpiration(Invalid):
message = _("Invalid reservation expiration %(expire)s.")
class InvalidQuotaValue(Invalid):
message = _("Change would make usage less than 0 for the following "
"resources: %(unders)s")
class QuotaNotFound(NotFound):
message = _("Quota could not be found")
class QuotaResourceUnknown(QuotaNotFound):
message = _("Unknown quota resources %(unknown)s.")
class ProjectQuotaNotFound(QuotaNotFound):
message = _("Quota for project %(project_id)s could not be found.")
class QuotaClassNotFound(QuotaNotFound):
message = _("Quota class %(class_name)s could not be found.")
class QuotaUsageNotFound(QuotaNotFound):
message = _("Quota usage for project %(project_id)s could not be found.")
class ReservationNotFound(QuotaNotFound):
message = _("Quota reservation %(uuid)s could not be found.")
class OverQuota(CinderException):
message = _("Quota exceeded for resources: %(overs)s")
class FileNotFound(NotFound):
message = _("File %(file_path)s could not be found.")
class Duplicate(CinderException):
pass
class VolumeTypeExists(Duplicate):
message = _("Volume Type %(id)s already exists.")
class VolumeTypeAccessExists(Duplicate):
message = _("Volume type access for %(volume_type_id)s / "
"%(project_id)s combination already exists.")
class VolumeTypeEncryptionExists(Invalid):
message = _("Volume type encryption for type %(type_id)s already exists.")
class VolumeTypeEncryptionNotFound(NotFound):
message = _("Volume type encryption for type %(type_id)s does not exist.")
class MalformedRequestBody(CinderException):
message = _("Malformed message body: %(reason)s")
class ConfigNotFound(NotFound):
message = _("Could not find config at %(path)s")
class ParameterNotFound(NotFound):
message = _("Could not find parameter %(param)s")
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
class NoValidHost(CinderException):
message = _("No valid host was found. %(reason)s")
class NoMoreTargets(CinderException):
"""No more available targets."""
pass
class QuotaError(CinderException):
message = _("Quota exceeded: code=%(code)s")
code = 413
headers = {'Retry-After': 0}
safe = True
class VolumeSizeExceedsAvailableQuota(QuotaError):
message = _("Requested volume or snapshot exceeds allowed %(name)s "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
def __init__(self, message=None, **kwargs):
kwargs.setdefault('name', 'gigabytes')
super(VolumeSizeExceedsAvailableQuota, self).__init__(
message, **kwargs)
class VolumeSizeExceedsLimit(QuotaError):
message = _("Requested volume size %(size)d is larger than "
"maximum allowed limit %(limit)d.")
class VolumeBackupSizeExceedsAvailableQuota(QuotaError):
message = _("Requested backup exceeds allowed Backup gigabytes "
"quota. Requested %(requested)sG, quota is %(quota)sG and "
"%(consumed)sG has been consumed.")
class VolumeLimitExceeded(QuotaError):
message = _("Maximum number of volumes allowed (%(allowed)d) exceeded for "
"quota '%(name)s'.")
def __init__(self, message=None, **kwargs):
kwargs.setdefault('name', 'volumes')
super(VolumeLimitExceeded, self).__init__(message, **kwargs)
class SnapshotLimitExceeded(QuotaError):
message = _("Maximum number of snapshots allowed (%(allowed)d) exceeded")
class BackupLimitExceeded(QuotaError):
message = _("Maximum number of backups allowed (%(allowed)d) exceeded")
class DuplicateSfVolumeNames(Duplicate):
message = _("Detected more than one volume with name %(vol_name)s")
class VolumeTypeCreateFailed(CinderException):
message = _("Cannot create volume_type with "
"name %(name)s and specs %(extra_specs)s")
class VolumeTypeUpdateFailed(CinderException):
message = _("Cannot update volume_type %(id)s")
class UnknownCmd(VolumeDriverException):
message = _("Unknown or unsupported command %(cmd)s")
class MalformedResponse(VolumeDriverException):
message = _("Malformed response to command %(cmd)s: %(reason)s")
class FailedCmdWithDump(VolumeDriverException):
message = _("Operation failed with status=%(status)s. Full dump: %(data)s")
class InvalidConnectorException(VolumeDriverException):
message = _("Connector doesn't have required information: %(missing)s")
class GlanceMetadataExists(Invalid):
message = _("Glance metadata cannot be updated, key %(key)s"
" exists for volume id %(volume_id)s")
class GlanceMetadataNotFound(NotFound):
message = _("Glance metadata for volume/snapshot %(id)s cannot be found.")
class ExportFailure(Invalid):
message = _("Failed to export for volume: %(reason)s")
class RemoveExportException(VolumeDriverException):
message = _("Failed to remove export for volume %(volume)s: %(reason)s")
class MetadataCreateFailure(Invalid):
message = _("Failed to create metadata for volume: %(reason)s")
class MetadataUpdateFailure(Invalid):
message = _("Failed to update metadata for volume: %(reason)s")
class MetadataCopyFailure(Invalid):
message = _("Failed to copy metadata to volume: %(reason)s")
class InvalidMetadataType(Invalid):
message = _("The type of metadata: %(metadata_type)s for volume/snapshot "
"%(id)s is invalid.")
class ImageCopyFailure(Invalid):
message = _("Failed to copy image to volume: %(reason)s")
class BackupInvalidCephArgs(BackupDriverException):
message = _("Invalid Ceph args provided for backup rbd operation")
class BackupOperationError(Invalid):
message = _("An error has occurred during backup operation")
class BackupMetadataUnsupportedVersion(BackupDriverException):
message = _("Unsupported backup metadata version requested")
class BackupVerifyUnsupportedDriver(BackupDriverException):
message = _("Unsupported backup verify driver")
class VolumeMetadataBackupExists(BackupDriverException):
message = _("Metadata backup already exists for this volume")
class BackupRBDOperationFailed(BackupDriverException):
message = _("Backup RBD operation failed")
class EncryptedBackupOperationFailed(BackupDriverException):
message = _("Backup operation of an encrypted volume failed.")
class BackupNotFound(NotFound):
message = _("Backup %(backup_id)s could not be found.")
class BackupFailedToGetVolumeBackend(NotFound):
message = _("Failed to identify volume backend.")
class InvalidBackup(Invalid):
message = _("Invalid backup: %(reason)s")
class SwiftConnectionFailed(BackupDriverException):
message = _("Connection to swift failed: %(reason)s")
class TransferNotFound(NotFound):
message = _("Transfer %(transfer_id)s could not be found.")
class VolumeMigrationFailed(CinderException):
message = _("Volume migration failed: %(reason)s")
class SSHInjectionThreat(CinderException):
message = _("SSH command injection detected: %(command)s")
class QoSSpecsExists(Duplicate):
message = _("QoS Specs %(specs_id)s already exists.")
class QoSSpecsCreateFailed(CinderException):
message = _("Failed to create qos_specs: "
"%(name)s with specs %(qos_specs)s.")
class QoSSpecsUpdateFailed(CinderException):
message = _("Failed to update qos_specs: "
"%(specs_id)s with specs %(qos_specs)s.")
class QoSSpecsNotFound(NotFound):
message = _("No such QoS spec %(specs_id)s.")
class QoSSpecsAssociateFailed(CinderException):
message = _("Failed to associate qos_specs: "
"%(specs_id)s with type %(type_id)s.")
class QoSSpecsDisassociateFailed(CinderException):
message = _("Failed to disassociate qos_specs: "
"%(specs_id)s with type %(type_id)s.")
class QoSSpecsKeyNotFound(NotFound):
message = _("QoS spec %(specs_id)s has no spec with "
"key %(specs_key)s.")
class InvalidQoSSpecs(Invalid):
message = _("Invalid qos specs: %(reason)s")
class QoSSpecsInUse(CinderException):
message = _("QoS Specs %(specs_id)s is still associated with entities.")
class KeyManagerError(CinderException):
message = _("key manager error: %(reason)s")
class ManageExistingInvalidReference(CinderException):
message = _("Manage existing volume failed due to invalid backend "
"reference %(existing_ref)s: %(reason)s")
class ManageExistingAlreadyManaged(CinderException):
message = _("Unable to manage existing volume. "
"Volume %(volume_ref)s already managed.")
class ReplicationError(CinderException):
message = _("Volume %(volume_id)s replication "
"error: %(reason)s")
class ReplicationNotFound(NotFound):
message = _("Volume replication for %(volume_id)s "
"could not be found.")
class ManageExistingVolumeTypeMismatch(CinderException):
message = _("Manage existing volume failed due to volume type mismatch: "
"%(reason)s")
class ExtendVolumeError(CinderException):
message = _("Error extending volume: %(reason)s")
class EvaluatorParseException(Exception):
message = _("Error during evaluator parsing: %(reason)s")
UnsupportedObjectError = obj_exc.UnsupportedObjectError
OrphanedObjectError = obj_exc.OrphanedObjectError
IncompatibleObjectVersion = obj_exc.IncompatibleObjectVersion
ReadOnlyFieldError = obj_exc.ReadOnlyFieldError
ObjectActionError = obj_exc.ObjectActionError
ObjectFieldInvalid = obj_exc.ObjectFieldInvalid
class VolumeGroupNotFound(CinderException):
message = _('Unable to find Volume Group: %(vg_name)s')
class VolumeGroupCreationFailed(CinderException):
message = _('Failed to create Volume Group: %(vg_name)s')
class VolumeDeviceNotFound(CinderException):
message = _('Volume device not found at %(device)s.')
# Driver specific exceptions
# Pure Storage
class PureDriverException(VolumeDriverException):
message = _("Pure Storage Cinder driver failure: %(reason)s")
# SolidFire
class SolidFireAPIException(VolumeBackendAPIException):
message = _("Bad response from SolidFire API")
class SolidFireDriverException(VolumeDriverException):
message = _("SolidFire Cinder Driver exception")
class SolidFireAPIDataException(SolidFireAPIException):
message = _("Error in SolidFire API response: data=%(data)s")
class SolidFireAccountNotFound(SolidFireDriverException):
message = _("Unable to locate account %(account_name)s on "
"Solidfire device")
class SolidFireRetryableException(VolumeBackendAPIException):
message = _("Retryable SolidFire Exception encountered")
# HP 3Par
class Invalid3PARDomain(VolumeDriverException):
message = _("Invalid 3PAR Domain: %(err)s")
# RemoteFS drivers
class RemoteFSException(VolumeDriverException):
message = _("Unknown RemoteFS exception")
class RemoteFSNoSharesMounted(RemoteFSException):
message = _("No mounted shares found")
class RemoteFSNoSuitableShareFound(RemoteFSException):
message = _("There is no share which can host %(volume_size)sG")
# NFS driver
class NfsException(RemoteFSException):
message = _("Unknown NFS exception")
class NfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted NFS shares found")
class NfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# Smbfs driver
class SmbfsException(RemoteFSException):
message = _("Unknown SMBFS exception.")
class SmbfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted SMBFS shares found.")
class SmbfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG.")
# Gluster driver
class GlusterfsException(RemoteFSException):
message = _("Unknown Gluster exception")
class GlusterfsNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted Gluster shares found")
class GlusterfsNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# Virtuozzo Storage Driver
class VzStorageException(RemoteFSException):
message = _("Unknown Virtuozzo Storage exception")
class VzStorageNoSharesMounted(RemoteFSNoSharesMounted):
message = _("No mounted Virtuozzo Storage shares found")
class VzStorageNoSuitableShareFound(RemoteFSNoSuitableShareFound):
message = _("There is no share which can host %(volume_size)sG")
# Fibre Channel Zone Manager
class ZoneManagerException(CinderException):
message = _("Fibre Channel connection control failure: %(reason)s")
class FCZoneDriverException(CinderException):
message = _("Fibre Channel Zone operation failed: %(reason)s")
class FCSanLookupServiceException(CinderException):
message = _("Fibre Channel SAN Lookup failure: %(reason)s")
class BrocadeZoningCliException(CinderException):
message = _("Fibre Channel Zoning CLI error: %(reason)s")
class CiscoZoningCliException(CinderException):
message = _("Fibre Channel Zoning CLI error: %(reason)s")
class NetAppDriverException(VolumeDriverException):
message = _("NetApp Cinder Driver exception.")
class EMCVnxCLICmdError(VolumeBackendAPIException):
message = _("EMC VNX Cinder Driver CLI exception: %(cmd)s "
"(Return Code: %(rc)s) (Output: %(out)s).")
# ConsistencyGroup
class ConsistencyGroupNotFound(NotFound):
message = _("ConsistencyGroup %(consistencygroup_id)s could not be found.")
class InvalidConsistencyGroup(Invalid):
message = _("Invalid ConsistencyGroup: %(reason)s")
# CgSnapshot
class CgSnapshotNotFound(NotFound):
message = _("CgSnapshot %(cgsnapshot_id)s could not be found.")
class InvalidCgSnapshot(Invalid):
message = _("Invalid CgSnapshot: %(reason)s")
# Hitachi Block Storage Driver
class HBSDError(CinderException):
message = _("HBSD error occurs.")
class HBSDCmdError(HBSDError):
def __init__(self, message=None, ret=None, err=None):
self.ret = ret
self.stderr = err
super(HBSDCmdError, self).__init__(message=message)
class HBSDBusy(HBSDError):
message = "Device or resource is busy."
class HBSDNotFound(NotFound):
message = _("Storage resource could not be found.")
class HBSDVolumeIsBusy(VolumeIsBusy):
message = _("Volume %(volume_name)s is busy.")
# Datera driver
class DateraAPIException(VolumeBackendAPIException):
message = _("Bad response from Datera API")
# Target drivers
class ISCSITargetCreateFailed(CinderException):
message = _("Failed to create iscsi target for volume %(volume_id)s.")
class ISCSITargetRemoveFailed(CinderException):
message = _("Failed to remove iscsi target for volume %(volume_id)s.")
class ISCSITargetAttachFailed(CinderException):
message = _("Failed to attach iSCSI target for volume %(volume_id)s.")
class ISCSITargetDetachFailed(CinderException):
message = _("Failed to detach iSCSI target for volume %(volume_id)s.")
class ISCSITargetHelperCommandFailed(CinderException):
message = _("%(error_message)s")
# X-IO driver exception.
class XIODriverException(VolumeDriverException):
message = _("X-IO Volume Driver exception!")
# Violin Memory drivers
class ViolinInvalidBackendConfig(CinderException):
message = _("Volume backend config is invalid: %(reason)s")
class ViolinRequestRetryTimeout(CinderException):
message = _("Backend service retry timeout hit: %(timeout)s sec")
class ViolinBackendErr(CinderException):
message = _("Backend reports: %(message)s")
class ViolinBackendErrExists(CinderException):
message = _("Backend reports: item already exists")
class ViolinBackendErrNotFound(CinderException):
message = _("Backend reports: item not found")
# ZFSSA NFS driver exception.
class WebDAVClientError(CinderException):
message = _("The WebDAV request failed. Reason: %(msg)s, "
"Return code/reason: %(code)s, Source Volume: %(src)s, "
"Destination Volume: %(dst)s, Method: %(method)s.")
# XtremIO Drivers
class XtremIOAlreadyMappedError(CinderException):
message = _("Volume to Initiator Group mapping already exists")
class XtremIOArrayBusy(CinderException):
message = _("System is busy, retry operation.")
# Infortrend EonStor DS Driver
class InfortrendCliException(CinderException):
message = _("Infortrend CLI exception: %(err)s Param: %(param)s "
"(Return Code: %(rc)s) (Output: %(out)s)")
# DOTHILL drivers
class DotHillInvalidBackend(CinderException):
message = _("Backend doesn't exist (%(backend)s)")
class DotHillConnectionError(CinderException):
message = _("%(message)s")
class DotHillAuthenticationError(CinderException):
message = _("%(message)s")
class DotHillNotEnoughSpace(CinderException):
message = _("Not enough space on backend (%(backend)s)")
class DotHillRequestError(CinderException):
message = _("%(message)s")
class DotHillNotTargetPortal(CinderException):
message = _("No active iSCSI portals with supplied iSCSI IPs")
# Sheepdog
class SheepdogError(VolumeBackendAPIException):
message = _("An error has occured in SheepdogDriver. (Reason: %(reason)s)")
class SheepdogCmdError(SheepdogError):
message = _("(Command: %(cmd)s) "
"(Return Code: %(exit_code)s) "
"(Stdout: %(stdout)s) "
"(Stderr: %(stderr)s)")
class MetadataAbsent(CinderException):
message = _("There is no metadata in DB object.")
class NotSupportedOperation(Invalid):
message = _("Operation not supported: %(operation)s.")
code = 405
# Hitachi HNAS drivers
class HNASConnError(CinderException):
message = _("%(message)s")
| apache-2.0 |
kirbyfan64/shedskin | examples/adatron.py | 6 | 6315 | #!/usr/bin/env python
# Adatron SVM with polynomial kernel
# placed in the public domain by Stavros Korokithakis
import sys
from math import exp
CYTOSOLIC = 0
EXTRACELLULAR = 1
NUCLEAR = 2
MITOCHONDRIAL = 3
BLIND = 4
D = 5.0
LENGTH = 50
PROTEINS = []
AMINOACIDS = "ACDEFGHIKLMNPQRSTVWY"
class Protein:
def __init__(self, name, mass, isoelectric_point, size, sequence, type):
self.name = name
self.mass = mass
self.isoelectric_point = isoelectric_point
self.size = size
self.sequence = sequence
self.type = type
self.extract_composition()
def extract_composition(self):
self.local_composition = dict(((x, 0.0) for x in AMINOACIDS))
for counter in range(LENGTH):
self.local_composition[self.sequence[counter]] += 1.0 / LENGTH
self.global_composition = dict(((x, 0.0) for x in AMINOACIDS))
for aminoacid in self.sequence:
self.global_composition[aminoacid] += 1.0 / len(self.sequence)
def create_vector(self):
vector = []
for key, value in sorted(self.local_composition.items()):
vector.append(value)
for key in sorted(self.global_composition.keys()):
vector.append(value)
return vector
def load_file(filename, type):
global PROTEINS
protfile = open(filename)
for line in protfile:
if line.startswith("name"):
continue
name, mass, isoelectric_point, size, sequence = line.strip().split("\t")
protein = Protein(name, mass, isoelectric_point, size, sequence, type)
PROTEINS.append(protein)
protfile.close()
def create_tables():
"""Create the feature and label tables."""
feature_table = []
label_table = []
for protein in PROTEINS:
feature_table.append(protein.create_vector())
for protein in PROTEINS:
if protein.type == BLIND:
continue
labels = [-1] * 4
# Invert the sign of the label our protein belongs to.
labels[protein.type] *= -1
label_table.append(labels)
return feature_table, label_table
def create_kernel_table(feature_table):
kernel_table = []
for row in feature_table:
kernel_row = []
for candidate in feature_table:
difference = 0.0
for counter in range(len(row)):
difference += (row[counter] - candidate[counter]) ** 2
kernel_row.append(exp(-D*difference))
kernel_table.append(kernel_row)
return kernel_table
def train_adatron(kernel_table, label_table, h, c):
tolerance = 0.5
alphas = [([0.0] * len(kernel_table)) for _ in range(len(label_table[0]))]
betas = [([0.0] * len(kernel_table)) for _ in range(len(label_table[0]))]
bias = [0.0] * len(label_table[0])
labelalphas = [0.0] * len(kernel_table)
max_differences = [(0.0, 0)] * len(label_table[0])
for iteration in range(10*len(kernel_table)):
print "Starting iteration %s..." % iteration
if iteration == 20: # XXX shedskin test
return alphas, bias
for klass in range(len(label_table[0])):
max_differences[klass] = (0.0, 0)
for elem in range(len(kernel_table)):
labelalphas[elem] = label_table[elem][klass] * alphas[klass][elem]
for col_counter in range(len(kernel_table)):
prediction = 0.0
for row_counter in range(len(kernel_table)):
prediction += kernel_table[col_counter][row_counter] * \
labelalphas[row_counter]
g = 1.0 - ((prediction + bias[klass]) * label_table[col_counter][klass])
betas[klass][col_counter] = min(max((alphas[klass][col_counter] + h * g), 0.0), c)
difference = abs(alphas[klass][col_counter] - betas[klass][col_counter])
if difference > max_differences[klass][0]:
max_differences[klass] = (difference, col_counter)
if all([max_difference[0] < tolerance for max_difference in max_differences]):
return alphas, bias
else:
alphas[klass][max_differences[klass][1]] = betas[klass][max_differences[klass][1]]
element_sum = 0.0
for element_counter in range(len(kernel_table)):
element_sum += label_table[element_counter][klass] * alphas[klass][element_counter] / 4
bias[klass] = bias[klass] + element_sum
def calculate_error(alphas, bias, kernel_table, label_table):
prediction = 0.0
predictions = [([0.0] * len(kernel_table)) for _ in range(len(label_table[0]))]
for klass in range(len(label_table[0])):
for col_counter in range(len(kernel_table)):
for row_counter in range(len(kernel_table)):
prediction += kernel_table[col_counter][row_counter] * \
label_table[row_counter][klass] * alphas[klass][row_counter]
predictions[klass][col_counter] = prediction + bias[klass]
for col_counter in range(len(kernel_table)):
current_predictions = []
error = 0
for row_counter in range(len(label_table[0])):
current_predictions.append(predictions[row_counter][col_counter])
predicted_class = current_predictions.index(max(current_predictions))
if label_table[col_counter][predicted_class] < 0:
error += 1
return 1.0 * error / len(kernel_table)
def main():
for filename, type in [("testdata/c.txt", CYTOSOLIC), ("testdata/e.txt", EXTRACELLULAR), ("testdata/n.txt", NUCLEAR), ("testdata/m.txt", MITOCHONDRIAL)]:#, ("b.txt", BLIND)]:
load_file(filename, type)
print "Creating feature tables..."
feature_table, label_table = create_tables()
#import pickle
#print "Loading kernel table..."
#kernel_file = file("kernel_table.txt")
#kernel_table = pickle.load(kernel_file)
#kernel_file.close()
print "Creating kernel table..."
kernel_table = create_kernel_table(feature_table)
print "Training SVM..."
alphas, bias = train_adatron(kernel_table, label_table, 1.0, 3.0)
print calculate_error(alphas, bias, kernel_table, label_table)
if __name__ == "__main__":
main()
| gpl-3.0 |
Java1Guy/ansible-modules-extras | packaging/os/pkgng.py | 60 | 11130 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, bleader
# Written by bleader <bleader@ratonland.org>
# Based on pkgin module written by Shaun Zinck <shaun.zinck at gmail.com>
# that was based on pacman module written by Afterburn <http://github.com/afterburn>
# that was based on apt module written by Matthew Williams <matthew@flowroute.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: pkgng
short_description: Package manager for FreeBSD >= 9.0
description:
- Manage binary packages for FreeBSD using 'pkgng' which
is available in versions after 9.0.
version_added: "1.2"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
cached:
description:
- use local package base or try to fetch an updated one
choices: [ 'yes', 'no' ]
required: false
default: no
annotation:
description:
- a comma-separated list of keyvalue-pairs of the form
<+/-/:><key>[=<value>]. A '+' denotes adding an annotation, a
'-' denotes removing an annotation, and ':' denotes modifying an
annotation.
If setting or modifying annotations, a value must be provided.
required: false
version_added: "1.6"
pkgsite:
description:
- for pkgng versions before 1.1.4, specify packagesite to use
for downloading packages, if not specified, use settings from
/usr/local/etc/pkg.conf
for newer pkgng versions, specify a the name of a repository
configured in /usr/local/etc/pkg/repos
required: false
rootdir:
description:
- for pkgng versions 1.5 and later, pkg will install all packages
within the specified root directory
required: false
author: "bleader (@bleader)"
notes:
- When using pkgsite, be careful that already in cache packages won't be downloaded again.
'''
EXAMPLES = '''
# Install package foo
- pkgng: name=foo state=present
# Annotate package foo and bar
- pkgng: name=foo,bar annotation=+test1=baz,-test2,:test3=foobar
# Remove packages foo and bar
- pkgng: name=foo,bar state=absent
'''
import json
import shlex
import os
import re
import sys
def query_package(module, pkgng_path, name, rootdir_arg):
rc, out, err = module.run_command("%s %s info -g -e %s" % (pkgng_path, rootdir_arg, name))
if rc == 0:
return True
return False
def pkgng_older_than(module, pkgng_path, compare_version):
rc, out, err = module.run_command("%s -v" % pkgng_path)
version = map(lambda x: int(x), re.split(r'[\._]', out))
i = 0
new_pkgng = True
while compare_version[i] == version[i]:
i += 1
if i == min(len(compare_version), len(version)):
break
else:
if compare_version[i] > version[i]:
new_pkgng = False
return not new_pkgng
def remove_packages(module, pkgng_path, packages, rootdir_arg):
remove_c = 0
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, pkgng_path, package, rootdir_arg):
continue
if not module.check_mode:
rc, out, err = module.run_command("%s %s delete -y %s" % (pkgng_path, rootdir_arg, package))
if not module.check_mode and query_package(module, pkgng_path, package, rootdir_arg):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
return (True, "removed %s package(s)" % remove_c)
return (False, "package(s) already absent")
def install_packages(module, pkgng_path, packages, cached, pkgsite, rootdir_arg):
install_c = 0
# as of pkg-1.1.4, PACKAGESITE is deprecated in favor of repository definitions
# in /usr/local/etc/pkg/repos
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 1, 4])
if pkgsite != "":
if old_pkgng:
pkgsite = "PACKAGESITE=%s" % (pkgsite)
else:
pkgsite = "-r %s" % (pkgsite)
batch_var = 'env BATCH=yes' # This environment variable skips mid-install prompts,
# setting them to their default values.
if not module.check_mode and not cached:
if old_pkgng:
rc, out, err = module.run_command("%s %s update" % (pkgsite, pkgng_path))
else:
rc, out, err = module.run_command("%s update" % (pkgng_path))
if rc != 0:
module.fail_json(msg="Could not update catalogue")
for package in packages:
if query_package(module, pkgng_path, package, rootdir_arg):
continue
if not module.check_mode:
if old_pkgng:
rc, out, err = module.run_command("%s %s %s install -g -U -y %s" % (batch_var, pkgsite, pkgng_path, package))
else:
rc, out, err = module.run_command("%s %s %s install %s -g -U -y %s" % (batch_var, pkgng_path, rootdir_arg, pkgsite, package))
if not module.check_mode and not query_package(module, pkgng_path, package, rootdir_arg):
module.fail_json(msg="failed to install %s: %s" % (package, out), stderr=err)
install_c += 1
if install_c > 0:
return (True, "added %s package(s)" % (install_c))
return (False, "package(s) already present")
def annotation_query(module, pkgng_path, package, tag, rootdir_arg):
rc, out, err = module.run_command("%s %s info -g -A %s" % (pkgng_path, rootdir_arg, package))
match = re.search(r'^\s*(?P<tag>%s)\s*:\s*(?P<value>\w+)' % tag, out, flags=re.MULTILINE)
if match:
return match.group('value')
return False
def annotation_add(module, pkgng_path, package, tag, value, rootdir_arg):
_value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
if not _value:
# Annotation does not exist, add it.
rc, out, err = module.run_command('%s %s annotate -y -A %s %s "%s"'
% (pkgng_path, rootdir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not annotate %s: %s"
% (package, out), stderr=err)
return True
elif _value != value:
# Annotation exists, but value differs
module.fail_json(
mgs="failed to annotate %s, because %s is already set to %s, but should be set to %s"
% (package, tag, _value, value))
return False
else:
# Annotation exists, nothing to do
return False
def annotation_delete(module, pkgng_path, package, tag, value, rootdir_arg):
_value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
if _value:
rc, out, err = module.run_command('%s %s annotate -y -D %s %s'
% (pkgng_path, rootdir_arg, package, tag))
if rc != 0:
module.fail_json("could not delete annotation to %s: %s"
% (package, out), stderr=err)
return True
return False
def annotation_modify(module, pkgng_path, package, tag, value, rootdir_arg):
_value = annotation_query(module, pkgng_path, package, tag, rootdir_arg)
if not value:
# No such tag
module.fail_json("could not change annotation to %s: tag %s does not exist"
% (package, tag))
elif _value == value:
# No change in value
return False
else:
rc,out,err = module.run_command('%s %s annotate -y -M %s %s "%s"'
% (pkgng_path, rootdir_arg, package, tag, value))
if rc != 0:
module.fail_json("could not change annotation annotation to %s: %s"
% (package, out), stderr=err)
return True
def annotate_packages(module, pkgng_path, packages, annotation, rootdir_arg):
annotate_c = 0
annotations = map(lambda _annotation:
re.match(r'(?P<operation>[\+-:])(?P<tag>\w+)(=(?P<value>\w+))?',
_annotation).groupdict(),
re.split(r',', annotation))
operation = {
'+': annotation_add,
'-': annotation_delete,
':': annotation_modify
}
for package in packages:
for _annotation in annotations:
if operation[_annotation['operation']](module, pkgng_path, package, _annotation['tag'], _annotation['value']):
annotate_c += 1
if annotate_c > 0:
return (True, "added %s annotations." % annotate_c)
return (False, "changed no annotations")
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"], required=False),
name = dict(aliases=["pkg"], required=True),
cached = dict(default=False, type='bool'),
annotation = dict(default="", required=False),
pkgsite = dict(default="", required=False),
rootdir = dict(default="", required=False)),
supports_check_mode = True)
pkgng_path = module.get_bin_path('pkg', True)
p = module.params
pkgs = p["name"].split(",")
changed = False
msgs = []
rootdir_arg = ""
if p["rootdir"] != "":
old_pkgng = pkgng_older_than(module, pkgng_path, [1, 5, 0])
if old_pkgng:
module.fail_json(msg="To use option 'rootdir' pkg version must be 1.5 or greater")
else:
rootdir_arg = "--rootdir %s" % (p["rootdir"])
if p["state"] == "present":
_changed, _msg = install_packages(module, pkgng_path, pkgs, p["cached"], p["pkgsite"], rootdir_arg)
changed = changed or _changed
msgs.append(_msg)
elif p["state"] == "absent":
_changed, _msg = remove_packages(module, pkgng_path, pkgs, rootdir_arg)
changed = changed or _changed
msgs.append(_msg)
if p["annotation"]:
_changed, _msg = annotate_packages(module, pkgng_path, pkgs, p["annotation"], rootdir_arg)
changed = changed or _changed
msgs.append(_msg)
module.exit_json(changed=changed, msg=", ".join(msgs))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
xianjunzhengbackup/Cloud-Native-Python | env/lib/python3.6/site-packages/flask/testsuite/basic.py | 406 | 43777 | # -*- coding: utf-8 -*-
"""
flask.testsuite.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import re
import uuid
import flask
import pickle
import unittest
from datetime import datetime
from threading import Thread
from flask.testsuite import FlaskTestCase, emits_module_deprecation_warning
from flask._compat import text_type
from werkzeug.exceptions import BadRequest, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
class BasicFunctionalityTestCase(FlaskTestCase):
def test_options_work(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
self.assert_equal(rv.data, b'')
def test_options_on_multiple_rules(self):
app = flask.Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
return 'Hello World'
@app.route('/', methods=['PUT'])
def index_put():
return 'Aha!'
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST', 'PUT'])
def test_options_handling_disabled(self):
app = flask.Flask(__name__)
def index():
return 'Hello World!'
index.provide_automatic_options = False
app.route('/')(index)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(rv.status_code, 405)
app = flask.Flask(__name__)
def index2():
return 'Hello World!'
index2.provide_automatic_options = True
app.route('/', methods=['OPTIONS'])(index2)
rv = app.test_client().open('/', method='OPTIONS')
self.assert_equal(sorted(rv.allow), ['OPTIONS'])
def test_request_dispatching(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return flask.request.method
@app.route('/more', methods=['GET', 'POST'])
def more():
return flask.request.method
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_false(rv.data) # head truncates
self.assert_equal(c.post('/more').data, b'POST')
self.assert_equal(c.get('/more').data, b'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_url_mapping(self):
app = flask.Flask(__name__)
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule('/', 'index', index)
app.add_url_rule('/more', 'more', more, methods=['GET', 'POST'])
c = app.test_client()
self.assert_equal(c.get('/').data, b'GET')
rv = c.post('/')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS'])
rv = c.head('/')
self.assert_equal(rv.status_code, 200)
self.assert_false(rv.data) # head truncates
self.assert_equal(c.post('/more').data, b'POST')
self.assert_equal(c.get('/more').data, b'GET')
rv = c.delete('/more')
self.assert_equal(rv.status_code, 405)
self.assert_equal(sorted(rv.allow), ['GET', 'HEAD', 'OPTIONS', 'POST'])
def test_werkzeug_routing(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
def bar():
return 'bar'
def index():
return 'index'
app.view_functions['bar'] = bar
app.view_functions['index'] = index
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
def test_endpoint_decorator(self):
from werkzeug.routing import Submount, Rule
app = flask.Flask(__name__)
app.url_map.add(Submount('/foo', [
Rule('/bar', endpoint='bar'),
Rule('/', endpoint='index')
]))
@app.endpoint('bar')
def bar():
return 'bar'
@app.endpoint('index')
def index():
return 'index'
c = app.test_client()
self.assert_equal(c.get('/foo/').data, b'index')
self.assert_equal(c.get('/foo/bar').data, b'bar')
def test_session(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/set', methods=['POST'])
def set():
flask.session['value'] = flask.request.form['value']
return 'value set'
@app.route('/get')
def get():
return flask.session['value']
c = app.test_client()
self.assert_equal(c.post('/set', data={'value': '42'}).data, b'value set')
self.assert_equal(c.get('/get').data, b'42')
def test_session_using_server_name(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com/')
self.assert_in('domain=.example.com', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_server_name_and_port(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_in('domain=.example.com', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_server_name_port_and_path(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='example.com:8080',
APPLICATION_ROOT='/foo'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/foo')
self.assert_in('domain=example.com', rv.headers['set-cookie'].lower())
self.assert_in('path=/foo', rv.headers['set-cookie'].lower())
self.assert_in('httponly', rv.headers['set-cookie'].lower())
def test_session_using_application_root(self):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
app = flask.Flask(__name__)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, '/bar')
app.config.update(
SECRET_KEY='foo',
APPLICATION_ROOT='/bar'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://example.com:8080/')
self.assert_in('path=/bar', rv.headers['set-cookie'].lower())
def test_session_using_session_settings(self):
app = flask.Flask(__name__)
app.config.update(
SECRET_KEY='foo',
SERVER_NAME='www.example.com:8080',
APPLICATION_ROOT='/test',
SESSION_COOKIE_DOMAIN='.example.com',
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_PATH='/'
)
@app.route('/')
def index():
flask.session['testing'] = 42
return 'Hello World'
rv = app.test_client().get('/', 'http://www.example.com:8080/test/')
cookie = rv.headers['set-cookie'].lower()
self.assert_in('domain=.example.com', cookie)
self.assert_in('path=/', cookie)
self.assert_in('secure', cookie)
self.assert_not_in('httponly', cookie)
def test_missing_session(self):
app = flask.Flask(__name__)
def expect_exception(f, *args, **kwargs):
try:
f(*args, **kwargs)
except RuntimeError as e:
self.assert_true(e.args and 'session is unavailable' in e.args[0])
else:
self.assert_true(False, 'expected exception')
with app.test_request_context():
self.assert_true(flask.session.get('missing_key') is None)
expect_exception(flask.session.__setitem__, 'foo', 42)
expect_exception(flask.session.pop, 'foo')
def test_session_expiration(self):
permanent = True
app = flask.Flask(__name__)
app.secret_key = 'testkey'
@app.route('/')
def index():
flask.session['test'] = 42
flask.session.permanent = permanent
return ''
@app.route('/test')
def test():
return text_type(flask.session.permanent)
client = app.test_client()
rv = client.get('/')
self.assert_in('set-cookie', rv.headers)
match = re.search(r'\bexpires=([^;]+)(?i)', rv.headers['set-cookie'])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
self.assert_equal(expires.year, expected.year)
self.assert_equal(expires.month, expected.month)
self.assert_equal(expires.day, expected.day)
rv = client.get('/test')
self.assert_equal(rv.data, b'True')
permanent = False
rv = app.test_client().get('/')
self.assert_in('set-cookie', rv.headers)
match = re.search(r'\bexpires=([^;]+)', rv.headers['set-cookie'])
self.assert_true(match is None)
def test_session_stored_last(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
@app.after_request
def modify_session(response):
flask.session['foo'] = 42
return response
@app.route('/')
def dump_session_contents():
return repr(flask.session.get('foo'))
c = app.test_client()
self.assert_equal(c.get('/').data, b'None')
self.assert_equal(c.get('/').data, b'42')
def test_session_special_types(self):
app = flask.Flask(__name__)
app.secret_key = 'development-key'
app.testing = True
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.after_request
def modify_session(response):
flask.session['m'] = flask.Markup('Hello!')
flask.session['u'] = the_uuid
flask.session['dt'] = now
flask.session['b'] = b'\xff'
flask.session['t'] = (1, 2, 3)
return response
@app.route('/')
def dump_session_contents():
return pickle.dumps(dict(flask.session))
c = app.test_client()
c.get('/')
rv = pickle.loads(c.get('/').data)
self.assert_equal(rv['m'], flask.Markup('Hello!'))
self.assert_equal(type(rv['m']), flask.Markup)
self.assert_equal(rv['dt'], now)
self.assert_equal(rv['u'], the_uuid)
self.assert_equal(rv['b'], b'\xff')
self.assert_equal(type(rv['b']), bytes)
self.assert_equal(rv['t'], (1, 2, 3))
def test_flashes(self):
app = flask.Flask(__name__)
app.secret_key = 'testkey'
with app.test_request_context():
self.assert_false(flask.session.modified)
flask.flash('Zap')
flask.session.modified = False
flask.flash('Zip')
self.assert_true(flask.session.modified)
self.assert_equal(list(flask.get_flashed_messages()), ['Zap', 'Zip'])
def test_extended_flashing(self):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
app = flask.Flask(__name__)
app.secret_key = 'testkey'
app.testing = True
@app.route('/')
def index():
flask.flash(u'Hello World')
flask.flash(u'Hello World', 'error')
flask.flash(flask.Markup(u'<em>Testing</em>'), 'warning')
return ''
@app.route('/test/')
def test():
messages = flask.get_flashed_messages()
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], u'Hello World')
self.assert_equal(messages[2], flask.Markup(u'<em>Testing</em>'))
return ''
@app.route('/test_with_categories/')
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
self.assert_equal(len(messages), 3)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('error', u'Hello World'))
self.assert_equal(messages[2], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filter/')
def test_filter():
messages = flask.get_flashed_messages(category_filter=['message'], with_categories=True)
self.assert_equal(len(messages), 1)
self.assert_equal(messages[0], ('message', u'Hello World'))
return ''
@app.route('/test_filters/')
def test_filters():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'], with_categories=True)
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], ('message', u'Hello World'))
self.assert_equal(messages[1], ('warning', flask.Markup(u'<em>Testing</em>')))
return ''
@app.route('/test_filters_without_returning_categories/')
def test_filters2():
messages = flask.get_flashed_messages(category_filter=['message', 'warning'])
self.assert_equal(len(messages), 2)
self.assert_equal(messages[0], u'Hello World')
self.assert_equal(messages[1], flask.Markup(u'<em>Testing</em>'))
return ''
# Create new test client on each test to clean flashed messages.
c = app.test_client()
c.get('/')
c.get('/test/')
c = app.test_client()
c.get('/')
c.get('/test_with_categories/')
c = app.test_client()
c.get('/')
c.get('/test_filter/')
c = app.test_client()
c.get('/')
c.get('/test_filters/')
c = app.test_client()
c.get('/')
c.get('/test_filters_without_returning_categories/')
def test_request_processing(self):
app = flask.Flask(__name__)
evts = []
@app.before_request
def before_request():
evts.append('before')
@app.after_request
def after_request(response):
response.data += b'|after'
evts.append('after')
return response
@app.route('/')
def index():
self.assert_in('before', evts)
self.assert_not_in('after', evts)
return 'request'
self.assert_not_in('after', evts)
rv = app.test_client().get('/').data
self.assert_in('after', evts)
self.assert_equal(rv, b'request|after')
def test_after_request_processing(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.after_this_request
def foo(response):
response.headers['X-Foo'] = 'a header'
return response
return 'Test'
c = app.test_client()
resp = c.get('/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.headers['X-Foo'], 'a header')
def test_teardown_request_handler(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_in(b'Response', rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_debug_mode(self):
called = []
app = flask.Flask(__name__)
app.testing = True
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route('/')
def root():
return "Response"
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 200)
self.assert_in(b'Response', rv.data)
self.assert_equal(len(called), 1)
def test_teardown_request_handler_error(self):
called = []
app = flask.Flask(__name__)
@app.teardown_request
def teardown_request1(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
self.assert_equal(type(exc), ZeroDivisionError)
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route('/')
def fails():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
self.assert_equal(len(called), 2)
def test_before_after_request_order(self):
called = []
app = flask.Flask(__name__)
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route('/')
def index():
return '42'
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'42')
self.assert_equal(called, [1, 2, 3, 4, 5, 6])
def test_error_handling(self):
app = flask.Flask(__name__)
@app.errorhandler(404)
def not_found(e):
return 'not found', 404
@app.errorhandler(500)
def internal_server_error(e):
return 'internal server error', 500
@app.route('/')
def index():
flask.abort(404)
@app.route('/error')
def error():
1 // 0
c = app.test_client()
rv = c.get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'not found')
rv = c.get('/error')
self.assert_equal(rv.status_code, 500)
self.assert_equal(b'internal server error', rv.data)
def test_before_request_and_routing_errors(self):
app = flask.Flask(__name__)
@app.before_request
def attach_something():
flask.g.something = 'value'
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'value')
def test_user_error_handling(self):
class MyException(Exception):
pass
app = flask.Flask(__name__)
@app.errorhandler(MyException)
def handle_my_exception(e):
self.assert_true(isinstance(e, MyException))
return '42'
@app.route('/')
def index():
raise MyException()
c = app.test_client()
self.assert_equal(c.get('/').data, b'42')
def test_trapping_of_bad_request_key_errors(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/fail')
def fail():
flask.request.form['missing_key']
c = app.test_client()
self.assert_equal(c.get('/fail').status_code, 400)
app.config['TRAP_BAD_REQUEST_ERRORS'] = True
c = app.test_client()
try:
c.get('/fail')
except KeyError as e:
self.assert_true(isinstance(e, BadRequest))
else:
self.fail('Expected exception')
def test_trapping_of_all_http_exceptions(self):
app = flask.Flask(__name__)
app.testing = True
app.config['TRAP_HTTP_EXCEPTIONS'] = True
@app.route('/fail')
def fail():
flask.abort(404)
c = app.test_client()
try:
c.get('/fail')
except NotFound as e:
pass
else:
self.fail('Expected exception')
def test_enctype_debug_helper(self):
from flask.debughelpers import DebugFilesKeyError
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail', methods=['POST'])
def index():
return flask.request.files['foo'].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with app.test_client() as c:
try:
c.post('/fail', data={'foo': 'index.txt'})
except DebugFilesKeyError as e:
self.assert_in('no file contents were transmitted', str(e))
self.assert_in('This was submitted: "index.txt"', str(e))
else:
self.fail('Expected exception')
def test_response_creation(self):
app = flask.Flask(__name__)
@app.route('/unicode')
def from_unicode():
return u'Hällo Wörld'
@app.route('/string')
def from_string():
return u'Hällo Wörld'.encode('utf-8')
@app.route('/args')
def from_tuple():
return 'Meh', 400, {
'X-Foo': 'Testing',
'Content-Type': 'text/plain; charset=utf-8'
}
c = app.test_client()
self.assert_equal(c.get('/unicode').data, u'Hällo Wörld'.encode('utf-8'))
self.assert_equal(c.get('/string').data, u'Hällo Wörld'.encode('utf-8'))
rv = c.get('/args')
self.assert_equal(rv.data, b'Meh')
self.assert_equal(rv.headers['X-Foo'], 'Testing')
self.assert_equal(rv.status_code, 400)
self.assert_equal(rv.mimetype, 'text/plain')
def test_make_response(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response()
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('Awesome')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, b'Awesome')
self.assert_equal(rv.mimetype, 'text/html')
rv = flask.make_response('W00t', 404)
self.assert_equal(rv.status_code, 404)
self.assert_equal(rv.data, b'W00t')
self.assert_equal(rv.mimetype, 'text/html')
def test_make_response_with_response_instance(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.make_response(
flask.jsonify({'msg': 'W00t'}), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, b'{\n "msg": "W00t"\n}')
self.assertEqual(rv.mimetype, 'application/json')
rv = flask.make_response(
flask.Response(''), 400)
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.data, b'')
self.assertEqual(rv.mimetype, 'text/html')
rv = flask.make_response(
flask.Response('', headers={'Content-Type': 'text/html'}),
400, [('X-Foo', 'bar')])
self.assertEqual(rv.status_code, 400)
self.assertEqual(rv.headers['Content-Type'], 'text/html')
self.assertEqual(rv.headers['X-Foo'], 'bar')
def test_url_generation(self):
app = flask.Flask(__name__)
@app.route('/hello/<name>', methods=['POST'])
def hello():
pass
with app.test_request_context():
self.assert_equal(flask.url_for('hello', name='test x'), '/hello/test%20x')
self.assert_equal(flask.url_for('hello', name='test x', _external=True),
'http://localhost/hello/test%20x')
def test_build_error_handler(self):
app = flask.Flask(__name__)
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
self.assertRaises(BuildError, flask.url_for, 'spam')
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for('spam')
except BuildError as err:
error = err
try:
raise RuntimeError('Test case where BuildError is not current.')
except RuntimeError:
self.assertRaises(BuildError, app.handle_url_build_error, error, 'spam', {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return '/test_handler/'
app.url_build_error_handlers.append(handler)
with app.test_request_context():
self.assert_equal(flask.url_for('spam'), '/test_handler/')
def test_custom_converters(self):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(',')
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ','.join(base_to_url(x) for x in value)
app = flask.Flask(__name__)
app.url_map.converters['list'] = ListConverter
@app.route('/<list:args>')
def index(args):
return '|'.join(args)
c = app.test_client()
self.assert_equal(c.get('/1,2,3').data, b'1|2|3')
def test_static_files(self):
app = flask.Flask(__name__)
app.testing = True
rv = app.test_client().get('/static/index.html')
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data.strip(), b'<h1>Hello World!</h1>')
with app.test_request_context():
self.assert_equal(flask.url_for('static', filename='index.html'),
'/static/index.html')
rv.close()
def test_none_response(self):
app = flask.Flask(__name__)
@app.route('/')
def test():
return None
try:
app.test_client().get('/')
except ValueError as e:
self.assert_equal(str(e), 'View function did not return a response')
pass
else:
self.assert_true("Expected ValueError")
def test_request_locals(self):
self.assert_equal(repr(flask.g), '<LocalProxy unbound>')
self.assertFalse(flask.g)
def test_test_app_proper_environ(self):
app = flask.Flask(__name__)
app.config.update(
SERVER_NAME='localhost.localdomain:5000'
)
@app.route('/')
def index():
return 'Foo'
@app.route('/', subdomain='foo')
def subdomain():
return 'Foo SubDomain'
rv = app.test_client().get('/')
self.assert_equal(rv.data, b'Foo')
rv = app.test_client().get('/', 'http://localhost.localdomain:5000')
self.assert_equal(rv.data, b'Foo')
rv = app.test_client().get('/', 'https://localhost.localdomain:5000')
self.assert_equal(rv.data, b'Foo')
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'https://localhost.localdomain')
self.assert_equal(rv.data, b'Foo')
try:
app.config.update(SERVER_NAME='localhost.localdomain:443')
rv = app.test_client().get('/', 'https://localhost.localdomain')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError as e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " +
"('localhost.localdomain:443') does not match the " + \
"server name from the WSGI environment ('localhost.localdomain')")
try:
app.config.update(SERVER_NAME='localhost.localdomain')
rv = app.test_client().get('/', 'http://foo.localhost')
# Werkzeug 0.8
self.assert_equal(rv.status_code, 404)
except ValueError as e:
# Werkzeug 0.7
self.assert_equal(str(e), "the server name provided " + \
"('localhost.localdomain') does not match the " + \
"server name from the WSGI environment ('foo.localhost')")
rv = app.test_client().get('/', 'http://foo.localhost.localdomain')
self.assert_equal(rv.data, b'Foo SubDomain')
def test_exception_propagation(self):
def apprunner(configkey):
app = flask.Flask(__name__)
@app.route('/')
def index():
1 // 0
c = app.test_client()
if config_key is not None:
app.config[config_key] = True
try:
resp = c.get('/')
except Exception:
pass
else:
self.fail('expected exception')
else:
self.assert_equal(c.get('/').status_code, 500)
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in 'TESTING', 'PROPAGATE_EXCEPTIONS', 'DEBUG', None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
def test_max_content_length(self):
app = flask.Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 64
@app.before_request
def always_first():
flask.request.form['myfile']
self.assert_true(False)
@app.route('/accept', methods=['POST'])
def accept_file():
flask.request.form['myfile']
self.assert_true(False)
@app.errorhandler(413)
def catcher(error):
return '42'
c = app.test_client()
rv = c.post('/accept', data={'myfile': 'foo' * 100})
self.assert_equal(rv.data, b'42')
def test_url_processors(self):
app = flask.Flask(__name__)
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and \
app.url_map.is_endpoint_expecting(endpoint, 'lang_code'):
values.setdefault('lang_code', flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop('lang_code', None)
@app.route('/<lang_code>/')
def index():
return flask.url_for('about')
@app.route('/<lang_code>/about')
def about():
return flask.url_for('something_else')
@app.route('/foo')
def something_else():
return flask.url_for('about', lang_code='en')
c = app.test_client()
self.assert_equal(c.get('/de/').data, b'/de/about')
self.assert_equal(c.get('/de/about').data, b'/foo')
self.assert_equal(c.get('/foo').data, b'/en/about')
def test_inject_blueprint_url_defaults(self):
app = flask.Flask(__name__)
bp = flask.Blueprint('foo.bar.baz', __name__,
template_folder='template')
@bp.url_defaults
def bp_defaults(endpoint, values):
values['page'] = 'login'
@bp.route('/<page>')
def view(page): pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults('foo.bar.baz.view', values)
expected = dict(page='login')
self.assert_equal(values, expected)
with app.test_request_context('/somepage'):
url = flask.url_for('foo.bar.baz.view')
expected = '/login'
self.assert_equal(url, expected)
def test_nonascii_pathinfo(self):
app = flask.Flask(__name__)
app.testing = True
@app.route(u'/киртест')
def index():
return 'Hello World!'
c = app.test_client()
rv = c.get(u'/киртест')
self.assert_equal(rv.data, b'Hello World!')
def test_debug_mode_complains_after_first_request(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
return 'Awesome'
self.assert_false(app.got_first_request)
self.assert_equal(app.test_client().get('/').data, b'Awesome')
try:
@app.route('/foo')
def broken():
return 'Meh'
except AssertionError as e:
self.assert_in('A setup function was called', str(e))
else:
self.fail('Expected exception')
app.debug = False
@app.route('/foo')
def working():
return 'Meh'
self.assert_equal(app.test_client().get('/foo').data, b'Meh')
self.assert_true(app.got_first_request)
def test_before_first_request_functions(self):
got = []
app = flask.Flask(__name__)
@app.before_first_request
def foo():
got.append(42)
c = app.test_client()
c.get('/')
self.assert_equal(got, [42])
c.get('/')
self.assert_equal(got, [42])
self.assert_true(app.got_first_request)
def test_routing_redirect_debugging(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/', methods=['GET', 'POST'])
def foo():
return 'success'
with app.test_client() as c:
try:
c.post('/foo', data={})
except AssertionError as e:
self.assert_in('http://localhost/foo/', str(e))
self.assert_in('Make sure to directly send your POST-request '
'to this URL', str(e))
else:
self.fail('Expected exception')
rv = c.get('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, b'success')
app.debug = False
with app.test_client() as c:
rv = c.post('/foo', data={}, follow_redirects=True)
self.assert_equal(rv.data, b'success')
def test_route_decorator_custom_endpoint(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/foo/')
def foo():
return flask.request.endpoint
@app.route('/bar/', endpoint='bar')
def for_bar():
return flask.request.endpoint
@app.route('/bar/123', endpoint='123')
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for('foo') == '/foo/'
assert flask.url_for('bar') == '/bar/'
assert flask.url_for('123') == '/bar/123'
c = app.test_client()
self.assertEqual(c.get('/foo/').data, b'foo')
self.assertEqual(c.get('/bar/').data, b'bar')
self.assertEqual(c.get('/bar/123').data, b'123')
def test_preserve_only_once(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/fail')
def fail_func():
1 // 0
c = app.test_client()
for x in range(3):
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_true(flask._request_ctx_stack.top is not None)
self.assert_true(flask._app_ctx_stack.top is not None)
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
self.assert_true(flask._request_ctx_stack.top is None)
self.assert_true(flask._app_ctx_stack.top is None)
def test_preserve_remembers_exception(self):
app = flask.Flask(__name__)
app.debug = True
errors = []
@app.route('/fail')
def fail_func():
1 // 0
@app.route('/success')
def success_func():
return 'Okay'
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
c = app.test_client()
# After this failure we did not yet call the teardown handler
with self.assert_raises(ZeroDivisionError):
c.get('/fail')
self.assert_equal(errors, [])
# But this request triggers it, and it's an error
c.get('/success')
self.assert_equal(len(errors), 2)
self.assert_true(isinstance(errors[0], ZeroDivisionError))
# At this point another request does nothing.
c.get('/success')
self.assert_equal(len(errors), 3)
self.assert_equal(errors[1], None)
def test_get_method_on_g(self):
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
self.assert_equal(flask.g.get('x'), None)
self.assert_equal(flask.g.get('x', 11), 11)
flask.g.x = 42
self.assert_equal(flask.g.get('x'), 42)
self.assert_equal(flask.g.x, 42)
def test_g_iteration_protocol(self):
app = flask.Flask(__name__)
app.testing = True
with app.app_context():
flask.g.foo = 23
flask.g.bar = 42
self.assert_equal('foo' in flask.g, True)
self.assert_equal('foos' in flask.g, False)
self.assert_equal(sorted(flask.g), ['bar', 'foo'])
class SubdomainTestCase(FlaskTestCase):
def test_basic_support(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/')
def normal_index():
return 'normal index'
@app.route('/', subdomain='test')
def test_index():
return 'test index'
c = app.test_client()
rv = c.get('/', 'http://localhost/')
self.assert_equal(rv.data, b'normal index')
rv = c.get('/', 'http://test.localhost/')
self.assert_equal(rv.data, b'test index')
@emits_module_deprecation_warning
def test_module_static_path_subdomain(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'example.com'
from subdomaintestmodule import mod
app.register_module(mod)
c = app.test_client()
rv = c.get('/static/hello.txt', 'http://foo.example.com/')
rv.direct_passthrough = False
self.assert_equal(rv.data.strip(), b'Hello Subdomain')
rv.close()
def test_subdomain_matching(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost/')
self.assert_equal(rv.data, b'index for mitsuhiko')
def test_subdomain_matching_with_ports(self):
app = flask.Flask(__name__)
app.config['SERVER_NAME'] = 'localhost:3000'
@app.route('/', subdomain='<user>')
def index(user):
return 'index for %s' % user
c = app.test_client()
rv = c.get('/', 'http://mitsuhiko.localhost:3000/')
self.assert_equal(rv.data, b'index for mitsuhiko')
@emits_module_deprecation_warning
def test_module_subdomain_support(self):
app = flask.Flask(__name__)
mod = flask.Module(__name__, 'test', subdomain='testing')
app.config['SERVER_NAME'] = 'localhost'
@mod.route('/test')
def test():
return 'Test'
@mod.route('/outside', subdomain='xtesting')
def bar():
return 'Outside'
app.register_module(mod)
c = app.test_client()
rv = c.get('/test', 'http://testing.localhost/')
self.assert_equal(rv.data, b'Test')
rv = c.get('/outside', 'http://xtesting.localhost/')
self.assert_equal(rv.data, b'Outside')
def test_multi_route_rules(self):
app = flask.Flask(__name__)
@app.route('/')
@app.route('/<test>/')
def index(test='a'):
return test
rv = app.test_client().open('/')
self.assert_equal(rv.data, b'a')
rv = app.test_client().open('/b/')
self.assert_equal(rv.data, b'b')
def test_multi_route_class_views(self):
class View(object):
def __init__(self, app):
app.add_url_rule('/', 'index', self.index)
app.add_url_rule('/<test>/', 'index', self.index)
def index(self, test='a'):
return test
app = flask.Flask(__name__)
_ = View(app)
rv = app.test_client().open('/')
self.assert_equal(rv.data, b'a')
rv = app.test_client().open('/b/')
self.assert_equal(rv.data, b'b')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(BasicFunctionalityTestCase))
suite.addTest(unittest.makeSuite(SubdomainTestCase))
return suite
| mit |
mortcanty/earthengine | src/Crypto/Signature/PKCS1_v1_5.py | 126 | 8637 | # -*- coding: utf-8 -*-
#
# Signature/PKCS1-v1_5.py : PKCS#1 v1.5
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""
RSA digital signature protocol according to PKCS#1 v1.5
See RFC3447__ or the `original RSA Labs specification`__.
This scheme is more properly called ``RSASSA-PKCS1-v1_5``.
For example, a sender may authenticate a message using SHA-1 like
this:
>>> from Crypto.Signature import PKCS1_v1_5
>>> from Crypto.Hash import SHA
>>> from Crypto.PublicKey import RSA
>>>
>>> message = 'To be signed'
>>> key = RSA.importKey(open('privkey.der').read())
>>> h = SHA.new(message)
>>> signer = PKCS1_v1_5.new(key)
>>> signature = signer.sign(h)
At the receiver side, verification can be done using the public part of
the RSA key:
>>> key = RSA.importKey(open('pubkey.der').read())
>>> h = SHA.new(message)
>>> verifier = PKCS1_v1_5.new(key)
>>> if verifier.verify(h, signature):
>>> print "The signature is authentic."
>>> else:
>>> print "The signature is not authentic."
:undocumented: __revision__, __package__
.. __: http://www.ietf.org/rfc/rfc3447.txt
.. __: http://www.rsa.com/rsalabs/node.asp?id=2125
"""
__revision__ = "$Id$"
__all__ = [ 'new', 'PKCS115_SigScheme' ]
import Crypto.Util.number
from Crypto.Util.number import ceil_div
from Crypto.Util.asn1 import DerSequence, DerNull, DerOctetString
from Crypto.Util.py3compat import *
class PKCS115_SigScheme:
"""This signature scheme can perform PKCS#1 v1.5 RSA signature or verification."""
def __init__(self, key):
"""Initialize this PKCS#1 v1.5 signature scheme object.
:Parameters:
key : an RSA key object
If a private half is given, both signature and verification are possible.
If a public half is given, only verification is possible.
"""
self._key = key
def can_sign(self):
"""Return True if this cipher object can be used for signing messages."""
return self._key.has_private()
def sign(self, mhash):
"""Produce the PKCS#1 v1.5 signature of a message.
This function is named ``RSASSA-PKCS1-V1_5-SIGN``, and is specified in
section 8.2.1 of RFC3447.
:Parameters:
mhash : hash object
The hash that was carried out over the message. This is an object
belonging to the `Crypto.Hash` module.
:Return: The signature encoded as a string.
:Raise ValueError:
If the RSA key length is not sufficiently long to deal with the given
hash algorithm.
:Raise TypeError:
If the RSA key has no private half.
"""
# TODO: Verify the key is RSA
# See 8.2.1 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
# Step 1
em = EMSA_PKCS1_V1_5_ENCODE(mhash, k)
# Step 2a (OS2IP) and 2b (RSASP1)
m = self._key.decrypt(em)
# Step 2c (I2OSP)
S = bchr(0x00)*(k-len(m)) + m
return S
def verify(self, mhash, S):
"""Verify that a certain PKCS#1 v1.5 signature is authentic.
This function checks if the party holding the private half of the key
really signed the message.
This function is named ``RSASSA-PKCS1-V1_5-VERIFY``, and is specified in
section 8.2.2 of RFC3447.
:Parameters:
mhash : hash object
The hash that was carried out over the message. This is an object
belonging to the `Crypto.Hash` module.
S : string
The signature that needs to be validated.
:Return: True if verification is correct. False otherwise.
"""
# TODO: Verify the key is RSA
# See 8.2.2 in RFC3447
modBits = Crypto.Util.number.size(self._key.n)
k = ceil_div(modBits,8) # Convert from bits to bytes
# Step 1
if len(S) != k:
return 0
# Step 2a (O2SIP) and 2b (RSAVP1)
# Note that signature must be smaller than the module
# but RSA.py won't complain about it.
# TODO: Fix RSA object; don't do it here.
m = self._key.encrypt(S, 0)[0]
# Step 2c (I2OSP)
em1 = bchr(0x00)*(k-len(m)) + m
# Step 3
try:
em2 = EMSA_PKCS1_V1_5_ENCODE(mhash, k)
except ValueError:
return 0
# Step 4
# By comparing the full encodings (as opposed to checking each
# of its components one at a time) we avoid attacks to the padding
# scheme like Bleichenbacher's (see http://www.mail-archive.com/cryptography@metzdowd.com/msg06537).
#
return em1==em2
def EMSA_PKCS1_V1_5_ENCODE(hash, emLen):
"""
Implement the ``EMSA-PKCS1-V1_5-ENCODE`` function, as defined
in PKCS#1 v2.1 (RFC3447, 9.2).
``EMSA-PKCS1-V1_5-ENCODE`` actually accepts the message ``M`` as input,
and hash it internally. Here, we expect that the message has already
been hashed instead.
:Parameters:
hash : hash object
The hash object that holds the digest of the message being signed.
emLen : int
The length the final encoding must have, in bytes.
:attention: the early standard (RFC2313) stated that ``DigestInfo``
had to be BER-encoded. This means that old signatures
might have length tags in indefinite form, which
is not supported in DER. Such encoding cannot be
reproduced by this function.
:attention: the same standard defined ``DigestAlgorithm`` to be
of ``AlgorithmIdentifier`` type, where the PARAMETERS
item is optional. Encodings for ``MD2/4/5`` without
``PARAMETERS`` cannot be reproduced by this function.
:Return: An ``emLen`` byte long string that encodes the hash.
"""
# First, build the ASN.1 DER object DigestInfo:
#
# DigestInfo ::= SEQUENCE {
# digestAlgorithm AlgorithmIdentifier,
# digest OCTET STRING
# }
#
# where digestAlgorithm identifies the hash function and shall be an
# algorithm ID with an OID in the set PKCS1-v1-5DigestAlgorithms.
#
# PKCS1-v1-5DigestAlgorithms ALGORITHM-IDENTIFIER ::= {
# { OID id-md2 PARAMETERS NULL }|
# { OID id-md5 PARAMETERS NULL }|
# { OID id-sha1 PARAMETERS NULL }|
# { OID id-sha256 PARAMETERS NULL }|
# { OID id-sha384 PARAMETERS NULL }|
# { OID id-sha512 PARAMETERS NULL }
# }
#
digestAlgo = DerSequence([hash.oid, DerNull().encode()])
digest = DerOctetString(hash.digest())
digestInfo = DerSequence([
digestAlgo.encode(),
digest.encode()
]).encode()
# We need at least 11 bytes for the remaining data: 3 fixed bytes and
# at least 8 bytes of padding).
if emLen<len(digestInfo)+11:
raise ValueError("Selected hash algorith has a too long digest (%d bytes)." % len(digest))
PS = bchr(0xFF) * (emLen - len(digestInfo) - 3)
return b("\x00\x01") + PS + bchr(0x00) + digestInfo
def new(key):
"""Return a signature scheme object `PKCS115_SigScheme` that
can be used to perform PKCS#1 v1.5 signature or verification.
:Parameters:
key : RSA key object
The key to use to sign or verify the message. This is a `Crypto.PublicKey.RSA` object.
Signing is only possible if *key* is a private RSA key.
"""
return PKCS115_SigScheme(key)
| mit |
mrunge/horizon | openstack_dashboard/dashboards/project/containers/panel.py | 48 | 1079 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
from openstack_dashboard.dashboards.project import dashboard
class Containers(horizon.Panel):
name = _("Containers")
slug = 'containers'
permissions = ('openstack.services.object-store',)
dashboard.Project.register(Containers)
| apache-2.0 |
EntropyFactory/creativechain-core | qa/rpc-tests/p2p-fullblocktest.py | 11 | 52732 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.comptool import TestManager, TestInstance, RejectResult
from test_framework.blocktools import *
import time
from test_framework.key import CECKey
from test_framework.script import *
import struct
class PreviousSpendableOutput(object):
def __init__(self, tx = CTransaction(), n = -1):
self.tx = tx
self.n = n # the output we're spending
'''
This reimplements tests from the bitcoinj/FullBlockTestGenerator used
by the pull-tester.
We use the testing framework in which we expect a particular answer from
each test.
'''
# Use this class for tests that require behavior other than normal "mininode" behavior.
# For now, it is used to serialize a bloated varint (b64).
class CBrokenBlock(CBlock):
def __init__(self, header=None):
super(CBrokenBlock, self).__init__(header)
def initialize(self, base_block):
self.vtx = copy.deepcopy(base_block.vtx)
self.hashMerkleRoot = self.calc_merkle_root()
def serialize(self):
r = b""
r += super(CBlock, self).serialize()
r += struct.pack("<BQ", 255, len(self.vtx))
for tx in self.vtx:
r += tx.serialize()
return r
def normal_serialize(self):
r = b""
r += super(CBrokenBlock, self).serialize()
return r
class FullBlockTest(ComparisonTestFramework):
# Can either run this test as 1 node with expected answers, or two and compare them.
# Change the "outcome" variable from each TestInstance object to only do the comparison.
def __init__(self):
super().__init__()
self.num_nodes = 1
self.block_heights = {}
self.coinbase_key = CECKey()
self.coinbase_key.set_secretbytes(b"horsebattery")
self.coinbase_pubkey = self.coinbase_key.get_pubkey()
self.tip = None
self.blocks = {}
def add_options(self, parser):
super().add_options(parser)
parser.add_option("--runbarelyexpensive", dest="runbarelyexpensive", default=True)
def run_test(self):
self.test = TestManager(self, self.options.tmpdir)
self.test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
self.test.run()
def add_transactions_to_block(self, block, tx_list):
[ tx.rehash() for tx in tx_list ]
block.vtx.extend(tx_list)
# this is a little handier to use than the version in blocktools.py
def create_tx(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = create_transaction(spend_tx, n, b"", value, script)
return tx
# sign a transaction, using the key we know about
# this signs input 0 in tx, which is assumed to be spending output n in spend_tx
def sign_tx(self, tx, spend_tx, n):
scriptPubKey = bytearray(spend_tx.vout[n].scriptPubKey)
if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend
tx.vin[0].scriptSig = CScript()
return
(sighash, err) = SignatureHash(spend_tx.vout[n].scriptPubKey, tx, 0, SIGHASH_ALL)
tx.vin[0].scriptSig = CScript([self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))])
def create_and_sign_transaction(self, spend_tx, n, value, script=CScript([OP_TRUE])):
tx = self.create_tx(spend_tx, n, value, script)
self.sign_tx(tx, spend_tx, n)
tx.rehash()
return tx
def next_block(self, number, spend=None, additional_coinbase_value=0, script=CScript([OP_TRUE]), solve=True):
if self.tip == None:
base_block_hash = self.genesis_hash
block_time = int(time.time())+1
else:
base_block_hash = self.tip.sha256
block_time = self.tip.nTime + 1
# First create the coinbase
height = self.block_heights[base_block_hash] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
coinbase.vout[0].nValue += additional_coinbase_value
coinbase.rehash()
if spend == None:
block = create_block(base_block_hash, coinbase, block_time)
else:
coinbase.vout[0].nValue += spend.tx.vout[spend.n].nValue - 1 # all but one satoshi to fees
coinbase.rehash()
block = create_block(base_block_hash, coinbase, block_time)
tx = create_transaction(spend.tx, spend.n, b"", 1, script) # spend 1 satoshi
self.sign_tx(tx, spend.tx, spend.n)
self.add_transactions_to_block(block, [tx])
block.hashMerkleRoot = block.calc_merkle_root()
if solve:
block.solve()
self.tip = block
self.block_heights[block.sha256] = height
assert number not in self.blocks
self.blocks[number] = block
return block
def get_tests(self):
self.genesis_hash = int(self.nodes[0].getbestblockhash(), 16)
self.block_heights[self.genesis_hash] = 0
spendable_outputs = []
# save the current tip so it can be spent by a later block
def save_spendable_output():
spendable_outputs.append(self.tip)
# get an output that we previously marked as spendable
def get_spendable_output():
return PreviousSpendableOutput(spendable_outputs.pop(0).vtx[0], 0)
# returns a test case that asserts that the current tip was accepted
def accepted():
return TestInstance([[self.tip, True]])
# returns a test case that asserts that the current tip was rejected
def rejected(reject = None):
if reject is None:
return TestInstance([[self.tip, False]])
else:
return TestInstance([[self.tip, reject]])
# move the tip back to a previous block
def tip(number):
self.tip = self.blocks[number]
# adds transactions to the block and updates state
def update_block(block_number, new_transactions):
block = self.blocks[block_number]
self.add_transactions_to_block(block, new_transactions)
old_sha256 = block.sha256
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
# Update the internal state just like in next_block
self.tip = block
if block.sha256 != old_sha256:
self.block_heights[block.sha256] = self.block_heights[old_sha256]
del self.block_heights[old_sha256]
self.blocks[block_number] = block
return block
# shorthand for functions
block = self.next_block
create_tx = self.create_tx
create_and_sign_tx = self.create_and_sign_transaction
# these must be updated if consensus changes
MAX_BLOCK_SIGOPS = 20000
# Create a new block
block(0)
save_spendable_output()
yield accepted()
# Now we need that block to mature so we can spend the coinbase.
test = TestInstance(sync_every_block=False)
for i in range(99):
block(5000 + i)
test.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
yield test
# collect spendable outputs now to avoid cluttering the code later on
out = []
for i in range(33):
out.append(get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
# in parentheses):
# genesis -> b1 (0) -> b2 (1)
block(1, spend=out[0])
save_spendable_output()
yield accepted()
block(2, spend=out[1])
yield accepted()
save_spendable_output()
# so fork like this:
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1)
#
# Nothing should happen at this point. We saw b2 first so it takes priority.
tip(1)
b3 = block(3, spend=out[1])
txout_b3 = PreviousSpendableOutput(b3.vtx[1], 0)
yield rejected()
# Now we add another block to make the alternative chain longer.
#
# genesis -> b1 (0) -> b2 (1)
# \-> b3 (1) -> b4 (2)
block(4, spend=out[2])
yield accepted()
# ... and back to the first chain.
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b3 (1) -> b4 (2)
tip(2)
block(5, spend=out[2])
save_spendable_output()
yield rejected()
block(6, spend=out[3])
yield accepted()
# Try to create a fork that double-spends
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b7 (2) -> b8 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(7, spend=out[2])
yield rejected()
block(8, spend=out[4])
yield rejected()
# Try to create a block that has too much fee
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b9 (4)
# \-> b3 (1) -> b4 (2)
tip(6)
block(9, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Create a fork that ends in a block with too much fee (the one that causes the reorg)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b10 (3) -> b11 (4)
# \-> b3 (1) -> b4 (2)
tip(5)
block(10, spend=out[3])
yield rejected()
block(11, spend=out[4], additional_coinbase_value=1)
yield rejected(RejectResult(16, b'bad-cb-amount'))
# Try again, but with a valid fork first
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b14 (5)
# (b12 added last)
# \-> b3 (1) -> b4 (2)
tip(5)
b12 = block(12, spend=out[3])
save_spendable_output()
b13 = block(13, spend=out[4])
# Deliver the block header for b12, and the block b13.
# b13 should be accepted but the tip won't advance until b12 is delivered.
yield TestInstance([[CBlockHeader(b12), None], [b13, False]])
save_spendable_output()
# b14 is invalid, but the node won't know that until it tries to connect
# Tip still can't advance because b12 is missing
block(14, spend=out[5], additional_coinbase_value=1)
yield rejected()
yield TestInstance([[b12, True, b13.sha256]]) # New tip should be b13.
# Add a block with MAX_BLOCK_SIGOPS and one with one more sigop
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b16 (6)
# \-> b3 (1) -> b4 (2)
# Test that a block with a lot of checksigs is okay
lots_of_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS - 1))
tip(13)
block(15, spend=out[5], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
# Test that a block with too many checksigs is rejected
too_many_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
block(16, spend=out[6], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Attempt to spend a transaction created on a different fork
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b17 (b3.vtx[1])
# \-> b3 (1) -> b4 (2)
tip(15)
block(17, spend=txout_b3)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to spend a transaction created on a different fork (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b18 (b3.vtx[1]) -> b19 (6)
# \-> b3 (1) -> b4 (2)
tip(13)
block(18, spend=txout_b3)
yield rejected()
block(19, spend=out[6])
yield rejected()
# Attempt to spend a coinbase at depth too low
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b20 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
block(20, spend=out[7])
yield rejected(RejectResult(16, b'bad-txns-premature-spend-of-coinbase'))
# Attempt to spend a coinbase at depth too low (on a fork this time)
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5)
# \-> b21 (6) -> b22 (5)
# \-> b3 (1) -> b4 (2)
tip(13)
block(21, spend=out[6])
yield rejected()
block(22, spend=out[5])
yield rejected()
# Create a block on either side of MAX_BLOCK_BASE_SIZE and make sure its accepted/rejected
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6)
# \-> b24 (6) -> b25 (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b23 = block(23, spend=out[6])
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b23.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b23.vtx[1].sha256, 0)))
b23 = update_block(23, [tx])
# Make sure the math above worked out to produce a max-sized block
assert_equal(len(b23.serialize()), MAX_BLOCK_BASE_SIZE)
yield accepted()
save_spendable_output()
# Make the next block one byte bigger and check that it fails
tip(15)
b24 = block(24, spend=out[6])
script_length = MAX_BLOCK_BASE_SIZE - len(b24.serialize()) - 69
script_output = CScript([b'\x00' * (script_length+1)])
tx.vout = [CTxOut(0, script_output)]
b24 = update_block(24, [tx])
assert_equal(len(b24.serialize()), MAX_BLOCK_BASE_SIZE+1)
yield rejected(RejectResult(16, b'bad-blk-length'))
block(25, spend=out[7])
yield rejected()
# Create blocks with a coinbase input script size out of range
# genesis -> b1 (0) -> b2 (1) -> b5 (2) -> b6 (3)
# \-> b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7)
# \-> ... (6) -> ... (7)
# \-> b3 (1) -> b4 (2)
tip(15)
b26 = block(26, spend=out[6])
b26.vtx[0].vin[0].scriptSig = b'\x00'
b26.vtx[0].rehash()
# update_block causes the merkle root to get updated, even with no new
# transactions, and updates the required state.
b26 = update_block(26, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b26 chain to make sure bitcoind isn't accepting b26
b27 = block(27, spend=out[7])
yield rejected(RejectResult(0, b'bad-prevblk'))
# Now try a too-large-coinbase script
tip(15)
b28 = block(28, spend=out[6])
b28.vtx[0].vin[0].scriptSig = b'\x00' * 101
b28.vtx[0].rehash()
b28 = update_block(28, [])
yield rejected(RejectResult(16, b'bad-cb-length'))
# Extend the b28 chain to make sure bitcoind isn't accepting b28
b29 = block(29, spend=out[7])
yield rejected(RejectResult(0, b'bad-prevblk'))
# b30 has a max-sized coinbase scriptSig.
tip(23)
b30 = block(30)
b30.vtx[0].vin[0].scriptSig = b'\x00' * 100
b30.vtx[0].rehash()
b30 = update_block(30, [])
yield accepted()
save_spendable_output()
# b31 - b35 - check sigops of OP_CHECKMULTISIG / OP_CHECKMULTISIGVERIFY / OP_CHECKSIGVERIFY
#
# genesis -> ... -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b36 (11)
# \-> b34 (10)
# \-> b32 (9)
#
# MULTISIG: each op code counts as 20 sigops. To create the edge case, pack another 19 sigops at the end.
lots_of_multisigs = CScript([OP_CHECKMULTISIG] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
b31 = block(31, spend=out[8], script=lots_of_multisigs)
assert_equal(get_legacy_sigopcount_block(b31), MAX_BLOCK_SIGOPS)
yield accepted()
save_spendable_output()
# this goes over the limit because the coinbase has one sigop
too_many_multisigs = CScript([OP_CHECKMULTISIG] * (MAX_BLOCK_SIGOPS // 20))
b32 = block(32, spend=out[9], script=too_many_multisigs)
assert_equal(get_legacy_sigopcount_block(b32), MAX_BLOCK_SIGOPS + 1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKMULTISIGVERIFY
tip(31)
lots_of_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * ((MAX_BLOCK_SIGOPS-1) // 20) + [OP_CHECKSIG] * 19)
block(33, spend=out[9], script=lots_of_multisigs)
yield accepted()
save_spendable_output()
too_many_multisigs = CScript([OP_CHECKMULTISIGVERIFY] * (MAX_BLOCK_SIGOPS // 20))
block(34, spend=out[10], script=too_many_multisigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# CHECKSIGVERIFY
tip(33)
lots_of_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS - 1))
b35 = block(35, spend=out[10], script=lots_of_checksigs)
yield accepted()
save_spendable_output()
too_many_checksigs = CScript([OP_CHECKSIGVERIFY] * (MAX_BLOCK_SIGOPS))
block(36, spend=out[11], script=too_many_checksigs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# Check spending of a transaction in a block which failed to connect
#
# b6 (3)
# b12 (3) -> b13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10)
# \-> b37 (11)
# \-> b38 (11/37)
#
# save 37's spendable output, but then double-spend out11 to invalidate the block
tip(35)
b37 = block(37, spend=out[11])
txout_b37 = PreviousSpendableOutput(b37.vtx[1], 0)
tx = create_and_sign_tx(out[11].tx, out[11].n, 0)
b37 = update_block(37, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# attempt to spend b37's first non-coinbase tx, at which point b37 was still considered valid
tip(35)
block(38, spend=txout_b37)
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Check P2SH SigOp counting
#
#
# 13 (4) -> b15 (5) -> b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b41 (12)
# \-> b40 (12)
#
# b39 - create some P2SH outputs that will require 6 sigops to spend:
#
# redeem_script = COINBASE_PUBKEY, (OP_2DUP+OP_CHECKSIGVERIFY) * 5, OP_CHECKSIG
# p2sh_script = OP_HASH160, ripemd160(sha256(script)), OP_EQUAL
#
tip(35)
b39 = block(39)
b39_outputs = 0
b39_sigops_per_output = 6
# Build the redeem script, hash it, use hash to create the p2sh script
redeem_script = CScript([self.coinbase_pubkey] + [OP_2DUP, OP_CHECKSIGVERIFY]*5 + [OP_CHECKSIG])
redeem_script_hash = hash160(redeem_script)
p2sh_script = CScript([OP_HASH160, redeem_script_hash, OP_EQUAL])
# Create a transaction that spends one satoshi to the p2sh_script, the rest to OP_TRUE
# This must be signed because it is spending a coinbase
spend = out[11]
tx = create_tx(spend.tx, spend.n, 1, p2sh_script)
tx.vout.append(CTxOut(spend.tx.vout[spend.n].nValue - 1, CScript([OP_TRUE])))
self.sign_tx(tx, spend.tx, spend.n)
tx.rehash()
b39 = update_block(39, [tx])
b39_outputs += 1
# Until block is full, add tx's with 1 satoshi to p2sh_script, the rest to OP_TRUE
tx_new = None
tx_last = tx
total_size=len(b39.serialize())
while(total_size < MAX_BLOCK_BASE_SIZE):
tx_new = create_tx(tx_last, 1, 1, p2sh_script)
tx_new.vout.append(CTxOut(tx_last.vout[1].nValue - 1, CScript([OP_TRUE])))
tx_new.rehash()
total_size += len(tx_new.serialize())
if total_size >= MAX_BLOCK_BASE_SIZE:
break
b39.vtx.append(tx_new) # add tx to block
tx_last = tx_new
b39_outputs += 1
b39 = update_block(39, [])
yield accepted()
save_spendable_output()
# Test sigops in P2SH redeem scripts
#
# b40 creates 3333 tx's spending the 6-sigop P2SH outputs from b39 for a total of 19998 sigops.
# The first tx has one sigop and then at the end we add 2 more to put us just over the max.
#
# b41 does the same, less one, so it has the maximum sigops permitted.
#
tip(39)
b40 = block(40, spend=out[12])
sigops = get_legacy_sigopcount_block(b40)
numTxes = (MAX_BLOCK_SIGOPS - sigops) // b39_sigops_per_output
assert_equal(numTxes <= b39_outputs, True)
lastOutpoint = COutPoint(b40.vtx[1].sha256, 0)
new_txs = []
for i in range(1, numTxes+1):
tx = CTransaction()
tx.vout.append(CTxOut(1, CScript([OP_TRUE])))
tx.vin.append(CTxIn(lastOutpoint, b''))
# second input is corresponding P2SH output from b39
tx.vin.append(CTxIn(COutPoint(b39.vtx[i].sha256, 0), b''))
# Note: must pass the redeem_script (not p2sh_script) to the signature hash function
(sighash, err) = SignatureHash(redeem_script, tx, 1, SIGHASH_ALL)
sig = self.coinbase_key.sign(sighash) + bytes(bytearray([SIGHASH_ALL]))
scriptSig = CScript([sig, redeem_script])
tx.vin[1].scriptSig = scriptSig
tx.rehash()
new_txs.append(tx)
lastOutpoint = COutPoint(tx.sha256, 0)
b40_sigops_to_fill = MAX_BLOCK_SIGOPS - (numTxes * b39_sigops_per_output + sigops) + 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b40_sigops_to_fill)))
tx.rehash()
new_txs.append(tx)
update_block(40, new_txs)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# same as b40, but one less sigop
tip(39)
b41 = block(41, spend=None)
update_block(41, b40.vtx[1:-1])
b41_sigops_to_fill = b40_sigops_to_fill - 1
tx = CTransaction()
tx.vin.append(CTxIn(lastOutpoint, b''))
tx.vout.append(CTxOut(1, CScript([OP_CHECKSIG] * b41_sigops_to_fill)))
tx.rehash()
update_block(41, [tx])
yield accepted()
# Fork off of b39 to create a constant base again
#
# b23 (6) -> b30 (7) -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13)
# \-> b41 (12)
#
tip(39)
block(42, spend=out[12])
yield rejected()
save_spendable_output()
block(43, spend=out[13])
yield accepted()
save_spendable_output()
# Test a number of really invalid scenarios
#
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b44 (14)
# \-> ??? (15)
# The next few blocks are going to be created "by hand" since they'll do funky things, such as having
# the first transaction be non-coinbase, etc. The purpose of b44 is to make sure this works.
height = self.block_heights[self.tip.sha256] + 1
coinbase = create_coinbase(height, self.coinbase_pubkey)
b44 = CBlock()
b44.nTime = self.tip.nTime + 1
b44.hashPrevBlock = self.tip.sha256
b44.nBits = 0x207fffff
b44.vtx.append(coinbase)
b44.hashMerkleRoot = b44.calc_merkle_root()
b44.solve()
self.tip = b44
self.block_heights[b44.sha256] = height
self.blocks[44] = b44
yield accepted()
# A block with a non-coinbase as the first tx
non_coinbase = create_tx(out[15].tx, out[15].n, 1)
b45 = CBlock()
b45.nTime = self.tip.nTime + 1
b45.hashPrevBlock = self.tip.sha256
b45.nBits = 0x207fffff
b45.vtx.append(non_coinbase)
b45.hashMerkleRoot = b45.calc_merkle_root()
b45.calc_sha256()
b45.solve()
self.block_heights[b45.sha256] = self.block_heights[self.tip.sha256]+1
self.tip = b45
self.blocks[45] = b45
yield rejected(RejectResult(16, b'bad-cb-missing'))
# A block with no txns
tip(44)
b46 = CBlock()
b46.nTime = b44.nTime+1
b46.hashPrevBlock = b44.sha256
b46.nBits = 0x207fffff
b46.vtx = []
b46.hashMerkleRoot = 0
b46.solve()
self.block_heights[b46.sha256] = self.block_heights[b44.sha256]+1
self.tip = b46
assert 46 not in self.blocks
self.blocks[46] = b46
s = ser_uint256(b46.hashMerkleRoot)
yield rejected(RejectResult(16, b'bad-blk-length'))
# A block with invalid work
tip(44)
b47 = block(47, solve=False)
target = uint256_from_compact(b47.nBits)
while b47.scrypt256 < target: #changed > to <
b47.nNonce += 1
b47.rehash()
yield rejected(RejectResult(16, b'high-hash'))
# A block with timestamp > 2 hrs in the future
tip(44)
b48 = block(48, solve=False)
b48.nTime = int(time.time()) + 60 * 60 * 3
b48.solve()
yield rejected(RejectResult(16, b'time-too-new'))
# A block with an invalid merkle hash
tip(44)
b49 = block(49)
b49.hashMerkleRoot += 1
b49.solve()
yield rejected(RejectResult(16, b'bad-txnmrklroot'))
# A block with an incorrect POW limit
tip(44)
b50 = block(50)
b50.nBits = b50.nBits - 1
b50.solve()
yield rejected(RejectResult(16, b'bad-diffbits'))
# A block with two coinbase txns
tip(44)
b51 = block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = update_block(51, [cb2])
yield rejected(RejectResult(16, b'bad-cb-multiple'))
# A block w/ duplicate txns
# Note: txns have to be in the right position in the merkle tree to trigger this error
tip(44)
b52 = block(52, spend=out[15])
tx = create_tx(b52.vtx[1], 0, 1)
b52 = update_block(52, [tx, tx])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# Test block timestamps
# -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15)
# \-> b54 (15)
#
tip(43)
block(53, spend=out[14])
yield rejected() # rejected since b44 is at same height
save_spendable_output()
# invalid timestamp (b35 is 5 blocks back, so its time is MedianTimePast)
b54 = block(54, spend=out[15])
b54.nTime = b35.nTime - 1
b54.solve()
yield rejected(RejectResult(16, b'time-too-old'))
# valid timestamp
tip(53)
b55 = block(55, spend=out[15])
b55.nTime = b35.nTime
update_block(55, [])
yield accepted()
save_spendable_output()
# Test CVE-2012-2459
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16)
# \-> b57 (16)
# \-> b56p2 (16)
# \-> b56 (16)
#
# Merkle tree malleability (CVE-2012-2459): repeating sequences of transactions in a block without
# affecting the merkle root of a block, while still invalidating it.
# See: src/consensus/merkle.h
#
# b57 has three txns: coinbase, tx, tx1. The merkle root computation will duplicate tx.
# Result: OK
#
# b56 copies b57 but duplicates tx1 and does not recalculate the block hash. So it has a valid merkle
# root but duplicate transactions.
# Result: Fails
#
# b57p2 has six transactions in its merkle tree:
# - coinbase, tx, tx1, tx2, tx3, tx4
# Merkle root calculation will duplicate as necessary.
# Result: OK.
#
# b56p2 copies b57p2 but adds both tx3 and tx4. The purpose of the test is to make sure the code catches
# duplicate txns that are not next to one another with the "bad-txns-duplicate" error (which indicates
# that the error was caught early, avoiding a DOS vulnerability.)
# b57 - a good block with 2 txs, don't submit until end
tip(55)
b57 = block(57)
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
b57 = update_block(57, [tx, tx1])
# b56 - copy b57, add a duplicate tx
tip(55)
b56 = copy.deepcopy(b57)
self.blocks[56] = b56
assert_equal(len(b56.vtx),3)
b56 = update_block(56, [tx1])
assert_equal(b56.hash, b57.hash)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
# b57p2 - a good block with 6 tx'es, don't submit until end
tip(55)
b57p2 = block("57p2")
tx = create_and_sign_tx(out[16].tx, out[16].n, 1)
tx1 = create_tx(tx, 0, 1)
tx2 = create_tx(tx1, 0, 1)
tx3 = create_tx(tx2, 0, 1)
tx4 = create_tx(tx3, 0, 1)
b57p2 = update_block("57p2", [tx, tx1, tx2, tx3, tx4])
# b56p2 - copy b57p2, duplicate two non-consecutive tx's
tip(55)
b56p2 = copy.deepcopy(b57p2)
self.blocks["b56p2"] = b56p2
assert_equal(b56p2.hash, b57p2.hash)
assert_equal(len(b56p2.vtx),6)
b56p2 = update_block("b56p2", [tx3, tx4])
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip("57p2")
yield accepted()
tip(57)
yield rejected() #rejected because 57p2 seen first
save_spendable_output()
# Test a few invalid tx types
#
# -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> ??? (17)
#
# tx with prevout.n out of range
tip(57)
b58 = block(58, spend=out[17])
tx = CTransaction()
assert(len(out[17].tx.vout) < 42)
tx.vin.append(CTxIn(COutPoint(out[17].tx.sha256, 42), CScript([OP_TRUE]), 0xffffffff))
tx.vout.append(CTxOut(0, b""))
tx.calc_sha256()
b58 = update_block(58, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# tx with output value > input value out of range
tip(57)
b59 = block(59)
tx = create_and_sign_tx(out[17].tx, out[17].n, 51*COIN)
b59 = update_block(59, [tx])
yield rejected(RejectResult(16, b'bad-txns-in-belowout'))
# reset to good chain
tip(57)
b60 = block(60, spend=out[17])
yield accepted()
save_spendable_output()
# Test BIP30
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b61 (18)
#
# Blocks are not allowed to contain a transaction whose id matches that of an earlier,
# not-fully-spent transaction in the same chain. To test, make identical coinbases;
# the second one should be rejected.
#
tip(60)
b61 = block(61, spend=out[18])
b61.vtx[0].vin[0].scriptSig = b60.vtx[0].vin[0].scriptSig #equalize the coinbases
b61.vtx[0].rehash()
b61 = update_block(61, [])
assert_equal(b60.vtx[0].serialize(), b61.vtx[0].serialize())
yield rejected(RejectResult(16, b'bad-txns-BIP30'))
# Test tx.isFinal is properly rejected (not an exhaustive tx.isFinal test, that should be in data-driven transaction tests)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b62 (18)
#
tip(60)
b62 = block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff #this locktime is non-final
assert(out[18].n < len(out[18].tx.vout))
tx.vin.append(CTxIn(COutPoint(out[18].tx.sha256, out[18].n))) # don't set nSequence
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
assert(tx.vin[0].nSequence < 0xffffffff)
tx.calc_sha256()
b62 = update_block(62, [tx])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# Test a non-final coinbase is also rejected
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17)
# \-> b63 (-)
#
tip(60)
b63 = block(63)
b63.vtx[0].nLockTime = 0xffffffff
b63.vtx[0].vin[0].nSequence = 0xDEADBEEF
b63.vtx[0].rehash()
b63 = update_block(63, [])
yield rejected(RejectResult(16, b'bad-txns-nonfinal'))
# This checks that a block with a bloated VARINT between the block_header and the array of tx such that
# the block is > MAX_BLOCK_BASE_SIZE with the bloated varint, but <= MAX_BLOCK_BASE_SIZE without the bloated varint,
# does not cause a subsequent, identical block with canonical encoding to be rejected. The test does not
# care whether the bloated block is accepted or rejected; it only cares that the second block is accepted.
#
# What matters is that the receiving node should not reject the bloated block, and then reject the canonical
# block on the basis that it's the same as an already-rejected block (which would be a consensus failure.)
#
# -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18)
# \
# b64a (18)
# b64a is a bloated block (non-canonical varint)
# b64 is a good block (same as b64 but w/ canonical varint)
#
tip(60)
regular_block = block("64a", spend=out[18])
# make it a "broken_block," with non-canonical serialization
b64a = CBrokenBlock(regular_block)
b64a.initialize(regular_block)
self.blocks["64a"] = b64a
self.tip = b64a
tx = CTransaction()
# use canonical serialization to calculate size
script_length = MAX_BLOCK_BASE_SIZE - len(b64a.normal_serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b64a.vtx[1].sha256, 0)))
b64a = update_block("64a", [tx])
assert_equal(len(b64a.serialize()), MAX_BLOCK_BASE_SIZE + 8)
yield TestInstance([[self.tip, None]])
# comptool workaround: to make sure b64 is delivered, manually erase b64a from blockstore
self.test.block_store.erase(b64a.sha256)
tip(60)
b64 = CBlock(b64a)
b64.vtx = copy.deepcopy(b64a.vtx)
assert_equal(b64.hash, b64a.hash)
assert_equal(len(b64.serialize()), MAX_BLOCK_BASE_SIZE)
self.blocks[64] = b64
update_block(64, [])
yield accepted()
save_spendable_output()
# Spend an output created in the block itself
#
# -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
#
tip(64)
b65 = block(65)
tx1 = create_and_sign_tx(out[19].tx, out[19].n, out[19].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 0)
update_block(65, [tx1, tx2])
yield accepted()
save_spendable_output()
# Attempt to spend an output created later in the same block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b66 (20)
tip(65)
b66 = block(66)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
update_block(66, [tx2, tx1])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Attempt to double-spend a transaction created in a block
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19)
# \-> b67 (20)
#
#
tip(65)
b67 = block(67)
tx1 = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue)
tx2 = create_and_sign_tx(tx1, 0, 1)
tx3 = create_and_sign_tx(tx1, 0, 2)
update_block(67, [tx1, tx2, tx3])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# More tests of block subsidy
#
# -> b43 (13) -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b68 (20)
#
# b68 - coinbase with an extra 10 satoshis,
# creates a tx that has 9 satoshis from out[20] go to fees
# this fails because the coinbase is trying to claim 1 satoshi too much in fees
#
# b69 - coinbase with extra 10 satoshis, and a tx that gives a 10 satoshi fee
# this succeeds
#
tip(65)
b68 = block(68, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-9)
update_block(68, [tx])
yield rejected(RejectResult(16, b'bad-cb-amount'))
tip(65)
b69 = block(69, additional_coinbase_value=10)
tx = create_and_sign_tx(out[20].tx, out[20].n, out[20].tx.vout[0].nValue-10)
update_block(69, [tx])
yield accepted()
save_spendable_output()
# Test spending the outpoint of a non-existent transaction
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20)
# \-> b70 (21)
#
tip(69)
block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(bogus_tx.sha256, 0), b"", 0xffffffff))
tx.vout.append(CTxOut(1, b""))
update_block(70, [tx])
yield rejected(RejectResult(16, b'bad-txns-inputs-missingorspent'))
# Test accepting an invalid block which has the same hash as a valid one (via merkle tree tricks)
#
# -> b53 (14) -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b71 (21)
#
# b72 is a good block.
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b71.
#
tip(69)
b72 = block(72)
tx1 = create_and_sign_tx(out[21].tx, out[21].n, 2)
tx2 = create_and_sign_tx(tx1, 0, 1)
b72 = update_block(72, [tx1, tx2]) # now tip is 72
b71 = copy.deepcopy(b72)
b71.vtx.append(tx2) # add duplicate tx2
self.block_heights[b71.sha256] = self.block_heights[b69.sha256] + 1 # b71 builds off b69
self.blocks[71] = b71
assert_equal(len(b71.vtx), 4)
assert_equal(len(b72.vtx), 3)
assert_equal(b72.sha256, b71.sha256)
tip(71)
yield rejected(RejectResult(16, b'bad-txns-duplicate'))
tip(72)
yield accepted()
save_spendable_output()
# Test some invalid scripts and MAX_BLOCK_SIGOPS
#
# -> b55 (15) -> b57 (16) -> b60 (17) -> b64 (18) -> b65 (19) -> b69 (20) -> b72 (21)
# \-> b** (22)
#
# b73 - tx with excessive sigops that are placed after an excessively large script element.
# The purpose of the test is to make sure those sigops are counted.
#
# script is a bytearray of size 20,526
#
# bytearray[0-19,998] : OP_CHECKSIG
# bytearray[19,999] : OP_PUSHDATA4
# bytearray[20,000-20,003]: 521 (max_script_element_size+1, in little-endian format)
# bytearray[20,004-20,525]: unread data (script_element)
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
#
tip(72)
b73 = block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e",16) # OP_PUSHDATA4
element_size = MAX_SCRIPT_ELEMENT_SIZE + 1
a[MAX_BLOCK_SIGOPS] = element_size % 256
a[MAX_BLOCK_SIGOPS+1] = element_size // 256
a[MAX_BLOCK_SIGOPS+2] = 0
a[MAX_BLOCK_SIGOPS+3] = 0
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b73 = update_block(73, [tx])
assert_equal(get_legacy_sigopcount_block(b73), MAX_BLOCK_SIGOPS+1)
yield rejected(RejectResult(16, b'bad-blk-sigops'))
# b74/75 - if we push an invalid script element, all prevous sigops are counted,
# but sigops after the element are not counted.
#
# The invalid script element is that the push_data indicates that
# there will be a large amount of data (0xffffff bytes), but we only
# provide a much smaller number. These bytes are CHECKSIGS so they would
# cause b75 to fail for excessive sigops, if those bytes were counted.
#
# b74 fails because we put MAX_BLOCK_SIGOPS+1 before the element
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
#
#
tip(72)
b74 = block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
a[MAX_BLOCK_SIGOPS+1] = 0xfe
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
a[MAX_BLOCK_SIGOPS+4] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b74 = update_block(74, [tx])
yield rejected(RejectResult(16, b'bad-blk-sigops'))
tip(72)
b75 = block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e
a[MAX_BLOCK_SIGOPS] = 0xff
a[MAX_BLOCK_SIGOPS+1] = 0xff
a[MAX_BLOCK_SIGOPS+2] = 0xff
a[MAX_BLOCK_SIGOPS+3] = 0xff
tx = create_and_sign_tx(out[22].tx, 0, 1, CScript(a))
b75 = update_block(75, [tx])
yield accepted()
save_spendable_output()
# Check that if we push an element filled with CHECKSIGs, they are not counted
tip(75)
b76 = block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS-1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
tx = create_and_sign_tx(out[23].tx, 0, 1, CScript(a))
b76 = update_block(76, [tx])
yield accepted()
save_spendable_output()
# Test transaction resurrection
#
# -> b77 (24) -> b78 (25) -> b79 (26)
# \-> b80 (25) -> b81 (26) -> b82 (27)
#
# b78 creates a tx, which is spent in b79. After b82, both should be in mempool
#
# The tx'es must be unsigned and pass the node's mempool policy. It is unsigned for the
# rather obscure reason that the Python signature code does not distinguish between
# Low-S and High-S values (whereas the bitcoin code has custom code which does so);
# as a result of which, the odds are 50% that the python code will use the right
# value and the transaction will be accepted into the mempool. Until we modify the
# test framework to support low-S signing, we are out of luck.
#
# To get around this issue, we construct transactions which are not signed and which
# spend to OP_TRUE. If the standard-ness rules change, this test would need to be
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
#
tip(76)
block(77)
tx77 = create_and_sign_tx(out[24].tx, out[24].n, 10*COIN)
update_block(77, [tx77])
yield accepted()
save_spendable_output()
block(78)
tx78 = create_tx(tx77, 0, 9*COIN)
update_block(78, [tx78])
yield accepted()
block(79)
tx79 = create_tx(tx78, 0, 8*COIN)
update_block(79, [tx79])
yield accepted()
# mempool should be empty
assert_equal(len(self.nodes[0].getrawmempool()), 0)
tip(77)
block(80, spend=out[25])
yield rejected()
save_spendable_output()
block(81, spend=out[26])
yield rejected() # other chain is same length
save_spendable_output()
block(82, spend=out[27])
yield accepted() # now this chain is longer, triggers re-org
save_spendable_output()
# now check that tx78 and tx79 have been put back into the peer's mempool
mempool = self.nodes[0].getrawmempool()
assert_equal(len(mempool), 2)
assert(tx78.hash in mempool)
assert(tx79.hash in mempool)
# Test invalid opcodes in dead execution paths.
#
# -> b81 (26) -> b82 (27) -> b83 (28)
#
b83 = block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = create_and_sign_tx(out[28].tx, out[28].n, out[28].tx.vout[0].nValue, script)
tx2 = create_and_sign_tx(tx1, 0, 0, CScript([OP_TRUE]))
tx2.vin[0].scriptSig = CScript([OP_FALSE])
tx2.rehash()
update_block(83, [tx1, tx2])
yield accepted()
save_spendable_output()
# Reorg on/off blocks that have OP_RETURN in them (and try to spend them)
#
# -> b81 (26) -> b82 (27) -> b83 (28) -> b84 (29) -> b87 (30) -> b88 (31)
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
#
b84 = block(84)
tx1 = create_tx(out[29].tx, out[29].n, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.calc_sha256()
self.sign_tx(tx1, out[29].tx, out[29].n)
tx1.rehash()
tx2 = create_tx(tx1, 1, 0, CScript([OP_RETURN]))
tx2.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx3 = create_tx(tx1, 2, 0, CScript([OP_RETURN]))
tx3.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx4 = create_tx(tx1, 3, 0, CScript([OP_TRUE]))
tx4.vout.append(CTxOut(0, CScript([OP_RETURN])))
tx5 = create_tx(tx1, 4, 0, CScript([OP_RETURN]))
update_block(84, [tx1,tx2,tx3,tx4,tx5])
yield accepted()
save_spendable_output()
tip(83)
block(85, spend=out[29])
yield rejected()
block(86, spend=out[30])
yield accepted()
tip(84)
block(87, spend=out[30])
yield rejected()
save_spendable_output()
block(88, spend=out[31])
yield accepted()
save_spendable_output()
# trying to spend the OP_RETURN output is rejected
block("89a", spend=out[32])
tx = create_tx(tx1, 0, 0, CScript([OP_TRUE]))
update_block("89a", [tx])
yield rejected()
# Test re-org of a week's worth of blocks (1088 blocks)
# This test takes a minute or two and can be accomplished in memory
#
if self.options.runbarelyexpensive:
tip(88)
LARGE_REORG_SIZE = 1088
test1 = TestInstance(sync_every_block=False)
spend=out[32]
for i in range(89, LARGE_REORG_SIZE + 89):
b = block(i, spend)
tx = CTransaction()
script_length = MAX_BLOCK_BASE_SIZE - len(b.serialize()) - 69
script_output = CScript([b'\x00' * script_length])
tx.vout.append(CTxOut(0, script_output))
tx.vin.append(CTxIn(COutPoint(b.vtx[1].sha256, 0)))
b = update_block(i, [tx])
assert_equal(len(b.serialize()), MAX_BLOCK_BASE_SIZE)
test1.blocks_and_transactions.append([self.tip, True])
save_spendable_output()
spend = get_spendable_output()
yield test1
chain1_tip = i
# now create alt chain of same length
tip(88)
test2 = TestInstance(sync_every_block=False)
for i in range(89, LARGE_REORG_SIZE + 89):
block("alt"+str(i))
test2.blocks_and_transactions.append([self.tip, False])
yield test2
# extend alt chain to trigger re-org
block("alt" + str(chain1_tip + 1))
yield accepted()
# ... and re-org back to the first chain
tip(chain1_tip)
block(chain1_tip + 1)
yield rejected()
block(chain1_tip + 2)
yield accepted()
chain1_tip += 2
if __name__ == '__main__':
FullBlockTest().main()
| mit |
Distrotech/intellij-community | python/lib/Lib/encodings/unicode_internal.py | 827 | 1196 | """ Python 'unicode-internal' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_internal_encode
decode = codecs.unicode_internal_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_internal_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_internal_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-internal',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
watermelo/libcloud | libcloud/backup/drivers/dummy.py | 31 | 1460 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from libcloud.backup.base import BackupDriver
class DummyBackupDriver(BackupDriver):
"""
Dummy Backup driver.
>>> from libcloud.backup.drivers.dummy import DummyBackupDriver
>>> driver = DummyBackupDriver('key', 'secret')
>>> driver.name
'Dummy Backup Provider'
"""
name = 'Dummy Backup Provider'
website = 'http://example.com'
def __init__(self, api_key, api_secret):
"""
:param api_key: API key or username to used (required)
:type api_key: ``str``
:param api_secret: Secret password to be used (required)
:type api_secret: ``str``
:rtype: ``None``
"""
| apache-2.0 |
pyramania/scipy | benchmarks/benchmarks/linalg.py | 41 | 4330 | from __future__ import division, absolute_import, print_function
import math
import numpy.linalg as nl
import numpy as np
from numpy.testing import assert_
from numpy.random import rand
try:
import scipy.linalg as sl
except ImportError:
pass
from .common import Benchmark
def random(size):
return rand(*size)
class Bench(Benchmark):
params = [
[20, 100, 500, 1000],
['contig', 'nocont'],
['numpy', 'scipy']
]
param_names = ['size', 'contiguous', 'module']
def setup(self, size, contig, module):
a = random([size,size])
# larger diagonal ensures non-singularity:
for i in range(size):
a[i,i] = 10*(.1+a[i,i])
b = random([size])
if contig != 'contig':
a = a[-1::-1,-1::-1] # turn into a non-contiguous array
assert_(not a.flags['CONTIGUOUS'])
self.a = a
self.b = b
def time_solve(self, size, contig, module):
if module == 'numpy':
nl.solve(self.a, self.b)
else:
sl.solve(self.a, self.b)
def time_inv(self, size, contig, module):
if module == 'numpy':
nl.inv(self.a)
else:
sl.inv(self.a)
def time_det(self, size, contig, module):
if module == 'numpy':
nl.det(self.a)
else:
sl.det(self.a)
def time_eigvals(self, size, contig, module):
if module == 'numpy':
nl.eigvals(self.a)
else:
sl.eigvals(self.a)
def time_svd(self, size, contig, module):
if module == 'numpy':
nl.svd(self.a)
else:
sl.svd(self.a)
class Norm(Benchmark):
params = [
[(20, 20), (100, 100), (1000, 1000), (20, 1000), (1000, 20)],
['contig', 'nocont'],
['numpy', 'scipy']
]
param_names = ['shape', 'contiguous', 'module']
def setup(self, shape, contig, module):
a = np.random.randn(*shape)
if contig != 'contig':
a = a[-1::-1,-1::-1] # turn into a non-contiguous array
assert_(not a.flags['CONTIGUOUS'])
self.a = a
def time_1_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a, ord=1)
else:
sl.norm(self.a, ord=1)
def time_inf_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a, ord=np.inf)
else:
sl.norm(self.a, ord=np.inf)
def time_frobenius_norm(self, size, contig, module):
if module == 'numpy':
nl.norm(self.a)
else:
sl.norm(self.a)
class Lstsq(Benchmark):
"""
Test the speed of four least-squares solvers on not full rank matrices.
Also check the difference in the solutions.
The matrix has the size ``(m, 2/3*m)``; the rank is ``1/2 * m``.
Matrix values are random in the range (-5, 5), the same is for the right
hand side. The complex matrix is the sum of real and imaginary matrices.
"""
param_names = ['dtype', 'size', 'driver']
params = [
[np.float64, np.complex128],
[10, 100, 1000],
['gelss', 'gelsy', 'gelsd', 'numpy'],
]
def setup(self, dtype, size, lapack_driver):
np.random.seed(1234)
n = math.ceil(2./3. * size)
k = math.ceil(1./2. * size)
m = size
if dtype is np.complex128:
A = ((10 * np.random.rand(m,k) - 5) +
1j*(10 * np.random.rand(m,k) - 5))
temp = ((10 * np.random.rand(k,n) - 5) +
1j*(10 * np.random.rand(k,n) - 5))
b = ((10 * np.random.rand(m,1) - 5) +
1j*(10 * np.random.rand(m,1) - 5))
else:
A = (10 * np.random.rand(m,k) - 5)
temp = 10 * np.random.rand(k,n) - 5
b = 10 * np.random.rand(m,1) - 5
self.A = A.dot(temp)
self.b = b
def time_lstsq(self, dtype, size, lapack_driver):
if lapack_driver == 'numpy':
np.linalg.lstsq(self.A, self.b,
rcond=np.finfo(self.A.dtype).eps * 100)
else:
sl.lstsq(self.A, self.b, cond=None, overwrite_a=False,
overwrite_b=False, check_finite=False,
lapack_driver=lapack_driver)
| bsd-3-clause |
JuliBakagianni/CEF-ELRC | metashare/repository/templatetags/email_protection.py | 6 | 2257 | from django import template
from random import randrange, shuffle
register = template.Library()
class EncryptEmail(template.Node):
"""
Template tag that allows to obfuscate email addresses in page templates.
Based on http://djangosnippets.org/snippets/1907/
"""
def __init__(self, context_var):
"""
Initialises this template tag.
"""
super(EncryptEmail, self).__init__()
self.context_var = template.Variable(context_var)
def render(self, context):
"""
Renders a given email address as obfuscated JavaScript code.
"""
email_address = self.context_var.resolve(context)
# Clean up the white space in the email address, if any
email_address = email_address.strip()
email_id = 'e' + str(randrange(1, 999999999))
character_set = '+-.0123456789@ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghi' \
'jklmnopqrstuvwxyz'
character_list = list(character_set)
shuffle(character_list)
key = ''.join(character_list)
crypted = ''.join([key[character_set.find(c)] for c in email_address])
# Create JavaScript-based, obfuscated email address representation.
script = 'var a="{}";var b=a.split("").sort().join("");var c="{}";' \
'var d="";for(var e=0;e<c.length;e++)d+=b.charAt(a.indexOf(c.cha' \
'rAt(e)));document.getElementById("{}").innerHTML="<a href=\\"ma' \
'ilto:"+d+"\\">"+d+"</a>"'.format(key, crypted, email_id)
script = script.replace("\\","\\\\").replace('"','\\"')
obfuscated = '<span id="{}">[javascript protected email address]</' \
'span><script type="text/javascript">/*<![CDATA[*/eval("{}")/*]]' \
'>*/</script>'.format(email_id, script)
return obfuscated
def encrypt_email(parser, token):
"""
Use it like this: {% encrypt_email user.email %}
"""
tokens = token.contents.split()
if len(tokens) != 2:
_msg = "%r tag accepts exactly two arguments" % tokens[0]
raise template.TemplateSyntaxError(_msg)
return EncryptEmail(tokens[1])
register.tag('encrypt_email', encrypt_email) | bsd-3-clause |
Microvellum/Fluid-Designer | win64-vc/2.78/python/lib/test/test_importlib/test_api.py | 8 | 17783 | from . import util as test_util
init = test_util.import_importlib('importlib')
util = test_util.import_importlib('importlib.util')
machinery = test_util.import_importlib('importlib.machinery')
import os.path
import sys
from test import support
import types
import unittest
import warnings
class ImportModuleTests:
"""Test importlib.import_module."""
def test_module_import(self):
# Test importing a top-level module.
with test_util.mock_modules('top_level') as mock:
with test_util.import_state(meta_path=[mock]):
module = self.init.import_module('top_level')
self.assertEqual(module.__name__, 'top_level')
def test_absolute_package_import(self):
# Test importing a module from a package with an absolute name.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
name = '{0}.mod'.format(pkg_name)
with test_util.mock_modules(pkg_long_name, name) as mock:
with test_util.import_state(meta_path=[mock]):
module = self.init.import_module(name)
self.assertEqual(module.__name__, name)
def test_shallow_relative_package_import(self):
# Test importing a module from a package through a relative import.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
module_name = 'mod'
absolute_name = '{0}.{1}'.format(pkg_name, module_name)
relative_name = '.{0}'.format(module_name)
with test_util.mock_modules(pkg_long_name, absolute_name) as mock:
with test_util.import_state(meta_path=[mock]):
self.init.import_module(pkg_name)
module = self.init.import_module(relative_name, pkg_name)
self.assertEqual(module.__name__, absolute_name)
def test_deep_relative_package_import(self):
modules = ['a.__init__', 'a.b.__init__', 'a.c']
with test_util.mock_modules(*modules) as mock:
with test_util.import_state(meta_path=[mock]):
self.init.import_module('a')
self.init.import_module('a.b')
module = self.init.import_module('..c', 'a.b')
self.assertEqual(module.__name__, 'a.c')
def test_absolute_import_with_package(self):
# Test importing a module from a package with an absolute name with
# the 'package' argument given.
pkg_name = 'pkg'
pkg_long_name = '{0}.__init__'.format(pkg_name)
name = '{0}.mod'.format(pkg_name)
with test_util.mock_modules(pkg_long_name, name) as mock:
with test_util.import_state(meta_path=[mock]):
self.init.import_module(pkg_name)
module = self.init.import_module(name, pkg_name)
self.assertEqual(module.__name__, name)
def test_relative_import_wo_package(self):
# Relative imports cannot happen without the 'package' argument being
# set.
with self.assertRaises(TypeError):
self.init.import_module('.support')
def test_loaded_once(self):
# Issue #13591: Modules should only be loaded once when
# initializing the parent package attempts to import the
# module currently being imported.
b_load_count = 0
def load_a():
self.init.import_module('a.b')
def load_b():
nonlocal b_load_count
b_load_count += 1
code = {'a': load_a, 'a.b': load_b}
modules = ['a.__init__', 'a.b']
with test_util.mock_modules(*modules, module_code=code) as mock:
with test_util.import_state(meta_path=[mock]):
self.init.import_module('a.b')
self.assertEqual(b_load_count, 1)
(Frozen_ImportModuleTests,
Source_ImportModuleTests
) = test_util.test_both(ImportModuleTests, init=init)
class FindLoaderTests:
class FakeMetaFinder:
@staticmethod
def find_module(name, path=None): return name, path
def test_sys_modules(self):
# If a module with __loader__ is in sys.modules, then return it.
name = 'some_mod'
with test_util.uncache(name):
module = types.ModuleType(name)
loader = 'a loader!'
module.__loader__ = loader
sys.modules[name] = module
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
found = self.init.find_loader(name)
self.assertEqual(loader, found)
def test_sys_modules_loader_is_None(self):
# If sys.modules[name].__loader__ is None, raise ValueError.
name = 'some_mod'
with test_util.uncache(name):
module = types.ModuleType(name)
module.__loader__ = None
sys.modules[name] = module
with self.assertRaises(ValueError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.init.find_loader(name)
def test_sys_modules_loader_is_not_set(self):
# Should raise ValueError
# Issue #17099
name = 'some_mod'
with test_util.uncache(name):
module = types.ModuleType(name)
try:
del module.__loader__
except AttributeError:
pass
sys.modules[name] = module
with self.assertRaises(ValueError):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.init.find_loader(name)
def test_success(self):
# Return the loader found on sys.meta_path.
name = 'some_mod'
with test_util.uncache(name):
with test_util.import_state(meta_path=[self.FakeMetaFinder]):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual((name, None), self.init.find_loader(name))
def test_success_path(self):
# Searching on a path should work.
name = 'some_mod'
path = 'path to some place'
with test_util.uncache(name):
with test_util.import_state(meta_path=[self.FakeMetaFinder]):
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertEqual((name, path),
self.init.find_loader(name, path))
def test_nothing(self):
# None is returned upon failure to find a loader.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
self.assertIsNone(self.init.find_loader('nevergoingtofindthismodule'))
(Frozen_FindLoaderTests,
Source_FindLoaderTests
) = test_util.test_both(FindLoaderTests, init=init)
class ReloadTests:
"""Test module reloading for builtin and extension modules."""
def test_reload_modules(self):
for mod in ('tokenize', 'time', 'marshal'):
with self.subTest(module=mod):
with support.CleanImport(mod):
module = self.init.import_module(mod)
self.init.reload(module)
def test_module_replaced(self):
def code():
import sys
module = type(sys)('top_level')
module.spam = 3
sys.modules['top_level'] = module
mock = test_util.mock_modules('top_level',
module_code={'top_level': code})
with mock:
with test_util.import_state(meta_path=[mock]):
module = self.init.import_module('top_level')
reloaded = self.init.reload(module)
actual = sys.modules['top_level']
self.assertEqual(actual.spam, 3)
self.assertEqual(reloaded.spam, 3)
def test_reload_missing_loader(self):
with support.CleanImport('types'):
import types
loader = types.__loader__
del types.__loader__
reloaded = self.init.reload(types)
self.assertIs(reloaded, types)
self.assertIs(sys.modules['types'], types)
self.assertEqual(reloaded.__loader__.path, loader.path)
def test_reload_loader_replaced(self):
with support.CleanImport('types'):
import types
types.__loader__ = None
self.init.invalidate_caches()
reloaded = self.init.reload(types)
self.assertIsNot(reloaded.__loader__, None)
self.assertIs(reloaded, types)
self.assertIs(sys.modules['types'], types)
def test_reload_location_changed(self):
name = 'spam'
with support.temp_cwd(None) as cwd:
with test_util.uncache('spam'):
with support.DirsOnSysPath(cwd):
# Start as a plain module.
self.init.invalidate_caches()
path = os.path.join(cwd, name + '.py')
cached = self.util.cache_from_source(path)
expected = {'__name__': name,
'__package__': '',
'__file__': path,
'__cached__': cached,
'__doc__': None,
}
support.create_empty_file(path)
module = self.init.import_module(name)
ns = vars(module).copy()
loader = ns.pop('__loader__')
spec = ns.pop('__spec__')
ns.pop('__builtins__', None) # An implementation detail.
self.assertEqual(spec.name, name)
self.assertEqual(spec.loader, loader)
self.assertEqual(loader.path, path)
self.assertEqual(ns, expected)
# Change to a package.
self.init.invalidate_caches()
init_path = os.path.join(cwd, name, '__init__.py')
cached = self.util.cache_from_source(init_path)
expected = {'__name__': name,
'__package__': name,
'__file__': init_path,
'__cached__': cached,
'__path__': [os.path.dirname(init_path)],
'__doc__': None,
}
os.mkdir(name)
os.rename(path, init_path)
reloaded = self.init.reload(module)
ns = vars(reloaded).copy()
loader = ns.pop('__loader__')
spec = ns.pop('__spec__')
ns.pop('__builtins__', None) # An implementation detail.
self.assertEqual(spec.name, name)
self.assertEqual(spec.loader, loader)
self.assertIs(reloaded, module)
self.assertEqual(loader.path, init_path)
self.maxDiff = None
self.assertEqual(ns, expected)
def test_reload_namespace_changed(self):
name = 'spam'
with support.temp_cwd(None) as cwd:
with test_util.uncache('spam'):
with support.DirsOnSysPath(cwd):
# Start as a namespace package.
self.init.invalidate_caches()
bad_path = os.path.join(cwd, name, '__init.py')
cached = self.util.cache_from_source(bad_path)
expected = {'__name__': name,
'__package__': name,
'__doc__': None,
}
os.mkdir(name)
with open(bad_path, 'w') as init_file:
init_file.write('eggs = None')
module = self.init.import_module(name)
ns = vars(module).copy()
loader = ns.pop('__loader__')
path = ns.pop('__path__')
spec = ns.pop('__spec__')
ns.pop('__builtins__', None) # An implementation detail.
self.assertEqual(spec.name, name)
self.assertIs(spec.loader, None)
self.assertIsNot(loader, None)
self.assertEqual(set(path),
set([os.path.dirname(bad_path)]))
with self.assertRaises(AttributeError):
# a NamespaceLoader
loader.path
self.assertEqual(ns, expected)
# Change to a regular package.
self.init.invalidate_caches()
init_path = os.path.join(cwd, name, '__init__.py')
cached = self.util.cache_from_source(init_path)
expected = {'__name__': name,
'__package__': name,
'__file__': init_path,
'__cached__': cached,
'__path__': [os.path.dirname(init_path)],
'__doc__': None,
'eggs': None,
}
os.rename(bad_path, init_path)
reloaded = self.init.reload(module)
ns = vars(reloaded).copy()
loader = ns.pop('__loader__')
spec = ns.pop('__spec__')
ns.pop('__builtins__', None) # An implementation detail.
self.assertEqual(spec.name, name)
self.assertEqual(spec.loader, loader)
self.assertIs(reloaded, module)
self.assertEqual(loader.path, init_path)
self.assertEqual(ns, expected)
def test_reload_submodule(self):
# See #19851.
name = 'spam'
subname = 'ham'
with test_util.temp_module(name, pkg=True) as pkg_dir:
fullname, _ = test_util.submodule(name, subname, pkg_dir)
ham = self.init.import_module(fullname)
reloaded = self.init.reload(ham)
self.assertIs(reloaded, ham)
(Frozen_ReloadTests,
Source_ReloadTests
) = test_util.test_both(ReloadTests, init=init, util=util)
class InvalidateCacheTests:
def test_method_called(self):
# If defined the method should be called.
class InvalidatingNullFinder:
def __init__(self, *ignored):
self.called = False
def find_module(self, *args):
return None
def invalidate_caches(self):
self.called = True
key = 'gobledeegook'
meta_ins = InvalidatingNullFinder()
path_ins = InvalidatingNullFinder()
sys.meta_path.insert(0, meta_ins)
self.addCleanup(lambda: sys.path_importer_cache.__delitem__(key))
sys.path_importer_cache[key] = path_ins
self.addCleanup(lambda: sys.meta_path.remove(meta_ins))
self.init.invalidate_caches()
self.assertTrue(meta_ins.called)
self.assertTrue(path_ins.called)
def test_method_lacking(self):
# There should be no issues if the method is not defined.
key = 'gobbledeegook'
sys.path_importer_cache[key] = None
self.addCleanup(lambda: sys.path_importer_cache.__delitem__(key))
self.init.invalidate_caches() # Shouldn't trigger an exception.
(Frozen_InvalidateCacheTests,
Source_InvalidateCacheTests
) = test_util.test_both(InvalidateCacheTests, init=init)
class FrozenImportlibTests(unittest.TestCase):
def test_no_frozen_importlib(self):
# Should be able to import w/o _frozen_importlib being defined.
# Can't do an isinstance() check since separate copies of importlib
# may have been used for import, so just check the name is not for the
# frozen loader.
source_init = init['Source']
self.assertNotEqual(source_init.__loader__.__class__.__name__,
'FrozenImporter')
class StartupTests:
def test_everyone_has___loader__(self):
# Issue #17098: all modules should have __loader__ defined.
for name, module in sys.modules.items():
if isinstance(module, types.ModuleType):
with self.subTest(name=name):
self.assertTrue(hasattr(module, '__loader__'),
'{!r} lacks a __loader__ attribute'.format(name))
if self.machinery.BuiltinImporter.find_module(name):
self.assertIsNot(module.__loader__, None)
elif self.machinery.FrozenImporter.find_module(name):
self.assertIsNot(module.__loader__, None)
def test_everyone_has___spec__(self):
for name, module in sys.modules.items():
if isinstance(module, types.ModuleType):
with self.subTest(name=name):
self.assertTrue(hasattr(module, '__spec__'))
if self.machinery.BuiltinImporter.find_module(name):
self.assertIsNot(module.__spec__, None)
elif self.machinery.FrozenImporter.find_module(name):
self.assertIsNot(module.__spec__, None)
(Frozen_StartupTests,
Source_StartupTests
) = test_util.test_both(StartupTests, machinery=machinery)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
cloudant/graphite-web | webapp/graphite/render/attime.py | 8 | 5014 | """Copyright 2008 Orbitz WorldWide
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
from datetime import datetime,timedelta
from time import daylight
from django.conf import settings
try: # See if there is a system installation of pytz first
import pytz
except ImportError: # Otherwise we fall back to Graphite's bundled version
from graphite.thirdparty import pytz
months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
weekdays = ['sun','mon','tue','wed','thu','fri','sat']
tzinfo = pytz.timezone(settings.TIME_ZONE)
def parseATTime(s):
s = s.strip().lower().replace('_','').replace(',','').replace(' ','')
if s.isdigit():
if len(s) == 8 and int(s[:4]) > 1900 and int(s[4:6]) < 13 and int(s[6:]) < 32:
pass #Fall back because its not a timestamp, its YYYYMMDD form
else:
return datetime.fromtimestamp(int(s),tzinfo)
if '+' in s:
ref,offset = s.split('+',1)
offset = '+' + offset
elif '-' in s:
ref,offset = s.split('-',1)
offset = '-' + offset
else:
ref,offset = s,''
return tzinfo.localize(parseTimeReference(ref), daylight) + parseTimeOffset(offset)
def parseTimeReference(ref):
if not ref or ref == 'now': return datetime.now()
#Time-of-day reference
i = ref.find(':')
hour,min = 0,0
if i != -1:
hour = int( ref[:i] )
min = int( ref[i+1:i+3] )
ref = ref[i+3:]
if ref[:2] == 'am': ref = ref[2:]
elif ref[:2] == 'pm':
hour = (hour + 12) % 24
ref = ref[2:]
if ref.startswith('noon'):
hour,min = 12,0
ref = ref[4:]
elif ref.startswith('midnight'):
hour,min = 0,0
ref = ref[8:]
elif ref.startswith('teatime'):
hour,min = 16,0
ref = ref[7:]
refDate = datetime.now().replace(hour=hour,minute=min,second=0)
#Day reference
if ref in ('yesterday','today','tomorrow'): #yesterday, today, tomorrow
if ref == 'yesterday':
refDate = refDate - timedelta(days=1)
if ref == 'tomorrow':
refDate = refDate + timedelta(days=1)
elif ref.count('/') == 2: #MM/DD/YY[YY]
m,d,y = map(int,ref.split('/'))
if y < 1900: y += 1900
if y < 1970: y += 100
refDate = refDate.replace(year=y)
try: # Fix for Bug #551771
refDate = refDate.replace(month=m)
refDate = refDate.replace(day=d)
except:
refDate = refDate.replace(day=d)
refDate = refDate.replace(month=m)
elif len(ref) == 8 and ref.isdigit(): #YYYYMMDD
refDate = refDate.replace(year= int(ref[:4]))
try: # Fix for Bug #551771
refDate = refDate.replace(month= int(ref[4:6]))
refDate = refDate.replace(day= int(ref[6:8]))
except:
refDate = refDate.replace(day= int(ref[6:8]))
refDate = refDate.replace(month= int(ref[4:6]))
elif ref[:3] in months: #MonthName DayOfMonth
refDate = refDate.replace(month= months.index(ref[:3]) + 1)
if ref[-2:].isdigit():
refDate = refDate.replace(day= int(ref[-2:]))
elif ref[-1:].isdigit():
refDate = refDate.replace(day= int(ref[-1:]))
else:
raise Exception, "Day of month required after month name"
elif ref[:3] in weekdays: #DayOfWeek (Monday, etc)
todayDayName = refDate.strftime("%a").lower()[:3]
today = weekdays.index( todayDayName )
twoWeeks = weekdays * 2
dayOffset = today - twoWeeks.index(ref[:3])
if dayOffset < 0: dayOffset += 7
refDate -= timedelta(days=dayOffset)
elif ref:
raise Exception, "Unknown day reference"
return refDate
def parseTimeOffset(offset):
if not offset:
return timedelta()
t = timedelta()
if offset[0].isdigit():
sign = 1
else:
sign = { '+' : 1, '-' : -1 }[offset[0]]
offset = offset[1:]
while offset:
i = 1
while offset[:i].isdigit() and i <= len(offset): i += 1
num = int(offset[:i-1])
offset = offset[i-1:]
i = 1
while offset[:i].isalpha() and i <= len(offset): i += 1
unit = offset[:i-1]
offset = offset[i-1:]
unitString = getUnitString(unit)
if unitString == 'months':
unitString = 'days'
num = num * 30
if unitString == 'years':
unitString = 'days'
num = num * 365
t += timedelta(**{ unitString : sign * num})
return t
def getUnitString(s):
if s.startswith('s'): return 'seconds'
if s.startswith('min'): return 'minutes'
if s.startswith('h'): return 'hours'
if s.startswith('d'): return 'days'
if s.startswith('w'): return 'weeks'
if s.startswith('mon'): return 'months'
if s.startswith('y'): return 'years'
raise Exception, "Invalid offset unit '%s'" % s
| apache-2.0 |
levibostian/myBlanky | googleAppEngine/google/appengine/_internal/django/utils/formats.py | 23 | 5789 | import decimal
import datetime
from google.appengine._internal.django.conf import settings
from google.appengine._internal.django.utils.translation import get_language, to_locale, check_for_language
from google.appengine._internal.django.utils.importlib import import_module
from google.appengine._internal.django.utils.encoding import smart_str
from google.appengine._internal.django.utils import dateformat, numberformat, datetime_safe
from google.appengine._internal.django.utils.safestring import mark_safe
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang):
"""
Does the heavy lifting of finding format modules.
"""
if check_for_language(lang):
format_locations = ['django.conf.locale.%s']
if settings.FORMAT_MODULE_PATH:
format_locations.append(settings.FORMAT_MODULE_PATH + '.%s')
format_locations.reverse()
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('.formats', location % loc)
except ImportError:
pass
def get_format_modules(reverse=False):
"""
Returns a list of the format modules found
"""
lang = get_language()
modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang)))
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
"""
format_type = smart_str(format_type)
if settings.USE_L10N:
cache_key = (format_type, get_language())
try:
return _format_cache[cache_key] or getattr(settings, format_type)
except KeyError:
for module in get_format_modules():
try:
val = getattr(module, format_type)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
return getattr(settings, format_type)
def date_format(value, format=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT'))
def time_format(value, format=None):
"""
Formats a datetime.time object using a localizable format
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT'))
def number_format(value, decimal_pos=None):
"""
Formats a numeric value using localization settings
"""
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR'),
decimal_pos,
get_format('NUMBER_GROUPING'),
get_format('THOUSAND_SEPARATOR'),
)
def localize(value):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format
"""
if isinstance(value, bool):
return mark_safe(unicode(value))
elif isinstance(value, (decimal.Decimal, float, int, long)):
return number_format(value)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT')
elif isinstance(value, datetime.date):
return date_format(value)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT')
else:
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, (decimal.Decimal, float, int, long)):
return number_format(value)
if isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = smart_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = smart_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = smart_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N:
decimal_separator = get_format('DECIMAL_SEPARATOR')
if isinstance(value, basestring):
parts = []
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
parts.append(value.replace(get_format('THOUSAND_SEPARATOR'), ''))
else:
parts.append(value)
value = '.'.join(reversed(parts))
return value
| mit |
mozvip/Sick-Beard | lib/requests/packages/chardet2/constants.py | 3008 | 1335 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
_debug = 0
eDetecting = 0
eFoundIt = 1
eNotMe = 2
eStart = 0
eError = 1
eItsMe = 2
SHORTCUT_THRESHOLD = 0.95
| gpl-3.0 |
a10networks/a10-neutron-lbaas | a10_neutron_lbaas/neutron_ext/services/a10_device_instance/plugin.py | 2 | 6477 | # Copyright 2015, A10 Networks
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.from neutron.db import model_base
from oslo_log import log as logging
import a10_neutron_lbaas.a10_config as a10_config
from a10_neutron_lbaas.neutron_ext.common import constants
from a10_neutron_lbaas.neutron_ext.common import resources as common_resources
import a10_neutron_lbaas.neutron_ext.db.a10_device_instance as a10_device_instance
import a10_neutron_lbaas.vthunder.instance_manager as instance_manager
from a10_openstack_lib.resources import a10_device_instance as resources
LOG = logging.getLogger(__name__)
# api, vthunder, instance, and db keys
_API = 0
_VTHUNDER_CONFIG = 1
_INSTANCE = 2
_DB = 3
_mappings = [("id", None, None, "id"),
("tenant_id", None, None, "tenant_id"),
("tenant_id", None, None, "project_id"),
("project_id", None, None, "tenant_id"),
("project_id", None, None, "project_id"),
("name", None, None, "name"),
("description", None, None, "description"),
("host", None, "ip_address", "host"),
("username", "username", None, "username"),
("password", "password", None, "password"),
("api_version", "api_version", None, "api_version"),
("protocol", "protocol", None, "protocol"),
("port", "port", None, "port"),
("nova_instance_id", None, "nova_instance_id", "nova_instance_id"),
(None, "autosnat", None, "autosnat"),
(None, "v_method", None, "v_method"),
(None, "shared_partition", None, "shared_partition"),
(None, "use_float", None, "use_float"),
(None, "default_virtual_server_vrid", None, "default_virtual_server_vrid"),
(None, "ipinip", None, "ipinip"),
(None, "write_memory", None, "write_memory"),
("management_network", "vthunder_management_network", None, None),
("data_networks", "vthunder_data_networks", None, None),
("image", "glance_image", None, None),
("flavor", "nova_flavor", None, None)]
def _convert(source, from_type, to_type):
result = {}
for mapping in _mappings:
source_key = mapping[from_type]
if source_key is None or source_key not in source:
continue
dest_key = mapping[to_type]
if dest_key is None:
continue
result[dest_key] = source[source_key]
return result
def _make_api_dict(db_record):
return _convert(db_record, _DB, _API)
class A10DeviceInstancePlugin(a10_device_instance.A10DeviceInstanceDbMixin):
supported_extension_aliases = [constants.A10_DEVICE_INSTANCE_EXT]
def get_a10_device_instances(self, context, filters=None, fields=None):
LOG.debug(
"A10DeviceInstancePlugin.get_a10_instances(): filters=%s, fields=%s",
filters,
fields)
db_instances = super(A10DeviceInstancePlugin, self).get_a10_device_instances(
context, filters=filters, fields=fields)
return map(_make_api_dict, db_instances)
def create_a10_device_instance(self, context, a10_device_instance):
"""Attempt to create instance using neutron context"""
LOG.debug("A10DeviceInstancePlugin.create(): a10_device_instance=%s", a10_device_instance)
config = a10_config.A10Config()
vthunder_defaults = config.get_vthunder_config()
imgr = instance_manager.InstanceManager.from_config(config, context)
dev_instance = common_resources.remove_attributes_not_specified(
a10_device_instance.get(resources.RESOURCE))
# Create the instance with specified defaults.
vthunder_config = vthunder_defaults.copy()
vthunder_config.update(_convert(dev_instance, _API, _VTHUNDER_CONFIG))
instance = imgr.create_device_instance(vthunder_config, dev_instance.get("name"))
db_record = {}
db_record.update(_convert(vthunder_config, _VTHUNDER_CONFIG, _DB))
db_record.update(_convert(dev_instance, _API, _DB))
db_record.update(_convert(instance, _INSTANCE, _DB))
# If success, return the created DB record
# Else, raise an exception because that's what we would do anyway
db_instance = super(A10DeviceInstancePlugin, self).create_a10_device_instance(
context, {resources.RESOURCE: db_record})
return _make_api_dict(db_instance)
def get_a10_device_instance(self, context, id, fields=None):
LOG.debug("A10DeviceInstancePlugin.get_a10_instance(): id=%s, fields=%s",
id, fields)
db_instance = super(A10DeviceInstancePlugin, self).get_a10_device_instance(
context, id, fields=fields)
return _make_api_dict(db_instance)
def update_a10_device_instance(self, context, id, a10_device_instance):
LOG.debug(
"A10DeviceInstancePlugin.update_a10_device_instance(): id=%s, instance=%s",
id,
a10_device_instance)
db_instance = super(A10DeviceInstancePlugin, self).update_a10_device_instance(
context,
id,
a10_device_instance)
return _make_api_dict(db_instance)
def delete_a10_device_instance(self, context, id):
LOG.debug("A10DeviceInstancePlugin.delete(): id=%s", id)
# Deleting the actual instance requires knowing the nova instance ID
instance = super(A10DeviceInstancePlugin, self).get_a10_device_instance(context,
id)
nova_instance_id = instance.get("nova_instance_id")
config = a10_config.A10Config()
imgr = instance_manager.InstanceManager.from_config(config, context)
imgr.delete_instance(nova_instance_id)
return super(A10DeviceInstancePlugin, self).delete_a10_device_instance(context, id)
| apache-2.0 |
mattdangerw/inkscape | share/extensions/jessyInk_keyBindings.py | 7 | 15464 | #!/usr/bin/env python
# Copyright 2008, 2009 Hannes Hochreiner
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
# These lines are only needed if you don't put the script directly into
# the installation directory
import sys
# Unix
sys.path.append('/usr/share/inkscape/extensions')
# OS X
sys.path.append('/Applications/Inkscape.app/Contents/Resources/extensions')
# Windows
sys.path.append('C:\Program Files\Inkscape\share\extensions')
# We will use the inkex module with the predefined Effect base class.
import inkex
inkex.localize()
class JessyInk_CustomKeyBindings(inkex.Effect):
modes = ('slide', 'index', 'drawing')
keyCodes = ('LEFT', 'RIGHT', 'DOWN', 'UP', 'HOME', 'END', 'ENTER', 'SPACE', 'PAGE_UP', 'PAGE_DOWN', 'ESCAPE')
slideActions = {}
slideCharCodes = {}
slideKeyCodes = {}
drawingActions = {}
drawingCharCodes = {}
drawingKeyCodes = {}
indexActions = {}
indexCharCodes = {}
indexKeyCodes = {}
def __init__(self):
# Call the base class constructor.
inkex.Effect.__init__(self)
self.OptionParser.add_option('--tab', action = 'store', type = 'string', dest = 'what')
self.OptionParser.add_option('--slide_backWithEffects', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_nextWithEffects', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_backWithoutEffects', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_nextWithoutEffects', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_firstSlide', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_lastSlide', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_switchToIndexMode', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_switchToDrawingMode', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_setDuration', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_addSlide', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_toggleProgressBar', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_resetTimer', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--slide_export', action = 'callback', type = 'string', callback = self.slideOptions, default = '')
self.OptionParser.add_option('--drawing_switchToSlideMode', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidthDefault', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth1', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth2', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth3', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth4', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth5', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth6', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth7', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth8', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathWidth9', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourBlue', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourCyan', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourGreen', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourBlack', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourMagenta', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourOrange', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourRed', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourWhite', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_pathColourYellow', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--drawing_undo', action = 'callback', type = 'string', callback = self.drawingOptions, default = '')
self.OptionParser.add_option('--index_selectSlideToLeft', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_selectSlideToRight', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_selectSlideAbove', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_selectSlideBelow', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_previousPage', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_nextPage', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_firstSlide', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_lastSlide', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_switchToSlideMode', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_decreaseNumberOfColumns', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_increaseNumberOfColumns', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
self.OptionParser.add_option('--index_setNumberOfColumnsToDefault', action = 'callback', type = 'string', callback = self.indexOptions, default = '')
inkex.NSS[u"jessyink"] = u"https://launchpad.net/jessyink"
self.slideActions["backWithEffects"] = "dispatchEffects(-1);"
self.slideActions["nextWithEffects"] = "dispatchEffects(1);"
self.slideActions["backWithoutEffects"] = "skipEffects(-1);"
self.slideActions["nextWithoutEffects"] = "skipEffects(1);"
self.slideActions["firstSlide"] = "slideSetActiveSlide(0);"
self.slideActions["lastSlide"] = "slideSetActiveSlide(slides.length - 1);"
self.slideActions["switchToIndexMode"] = "toggleSlideIndex();"
self.slideActions["switchToDrawingMode"] = "slideSwitchToDrawingMode();"
self.slideActions["setDuration"] = "slideQueryDuration();"
self.slideActions["addSlide"] = "slideAddSlide(activeSlide);"
self.slideActions["toggleProgressBar"] = "slideToggleProgressBarVisibility();"
self.slideActions["resetTimer"] = "slideResetTimer();"
self.slideActions["export"] = "slideUpdateExportLayer();"
self.drawingActions["switchToSlideMode"] = "drawingSwitchToSlideMode();"
self.drawingActions["pathWidthDefault"] = "drawingResetPathWidth();"
self.drawingActions["pathWidth1"] = "drawingSetPathWidth(1.0);"
self.drawingActions["pathWidth3"] = "drawingSetPathWidth(3.0);"
self.drawingActions["pathWidth5"] = "drawingSetPathWidth(5.0);"
self.drawingActions["pathWidth7"] = "drawingSetPathWidth(7.0);"
self.drawingActions["pathWidth9"] = "drawingSetPathWidth(9.0);"
self.drawingActions["pathColourBlue"] = "drawingSetPathColour(\"blue\");"
self.drawingActions["pathColourCyan"] = "drawingSetPathColour(\"cyan\");"
self.drawingActions["pathColourGreen"] = "drawingSetPathColour(\"green\");"
self.drawingActions["pathColourBlack"] = "drawingSetPathColour(\"black\");"
self.drawingActions["pathColourMagenta"] = "drawingSetPathColour(\"magenta\");"
self.drawingActions["pathColourOrange"] = "drawingSetPathColour(\"orange\");"
self.drawingActions["pathColourRed"] = "drawingSetPathColour(\"red\");"
self.drawingActions["pathColourWhite"] = "drawingSetPathColour(\"white\");"
self.drawingActions["pathColourYellow"] = "drawingSetPathColour(\"yellow\");"
self.drawingActions["undo"] = "drawingUndo();"
self.indexActions["selectSlideToLeft"] = "indexSetPageSlide(activeSlide - 1);"
self.indexActions["selectSlideToRight"] = "indexSetPageSlide(activeSlide + 1);"
self.indexActions["selectSlideAbove"] = "indexSetPageSlide(activeSlide - INDEX_COLUMNS);"
self.indexActions["selectSlideBelow"] = "indexSetPageSlide(activeSlide + INDEX_COLUMNS);"
self.indexActions["previousPage"] = "indexSetPageSlide(activeSlide - INDEX_COLUMNS * INDEX_COLUMNS);"
self.indexActions["nextPage"] = "indexSetPageSlide(activeSlide + INDEX_COLUMNS * INDEX_COLUMNS);"
self.indexActions["firstSlide"] = "indexSetPageSlide(0);"
self.indexActions["lastSlide"] = "indexSetPageSlide(slides.length - 1);"
self.indexActions["switchToSlideMode"] = "toggleSlideIndex();"
self.indexActions["decreaseNumberOfColumns"] = "indexDecreaseNumberOfColumns();"
self.indexActions["increaseNumberOfColumns"] = "indexIncreaseNumberOfColumns();"
self.indexActions["setNumberOfColumnsToDefault"] = "indexResetNumberOfColumns();"
def slideOptions(self, option, opt_str, value, parser):
action = self.getAction(opt_str)
valueArray = value.split(",")
for val in valueArray:
val = val.strip()
if val in self.keyCodes:
self.slideKeyCodes[val + "_KEY"] = self.slideActions[action]
elif len(val) == 1:
self.slideCharCodes[val] = self.slideActions[action]
def drawingOptions(self, option, opt_str, value, parser):
action = self.getAction(opt_str)
valueArray = value.split(",")
for val in valueArray:
val = val.strip()
if val in self.keyCodes:
self.drawingKeyCodes[val + "_KEY"] = self.drawingActions[action]
elif len(val) == 1:
self.drawingCharCodes[val] = self.drawingActions[action]
def indexOptions(self, option, opt_str, value, parser):
action = self.getAction(opt_str)
valueArray = value.split(",")
for val in valueArray:
val = val.strip()
if val in self.keyCodes:
self.indexKeyCodes[val + "_KEY"] = self.indexActions[action]
elif len(val) == 1:
self.indexCharCodes[val] = self.indexActions[action]
def effect(self):
# Check version.
scriptNodes = self.document.xpath("//svg:script[@jessyink:version='1.5.5']", namespaces=inkex.NSS)
if len(scriptNodes) != 1:
inkex.errormsg(_("The JessyInk script is not installed in this SVG file or has a different version than the JessyInk extensions. Please select \"install/update...\" from the \"JessyInk\" sub-menu of the \"Extensions\" menu to install or update the JessyInk script.\n\n"))
# Remove old master slide property
for node in self.document.xpath("//svg:g[@jessyink:customKeyBindings='customKeyBindings']", namespaces=inkex.NSS):
node.getparent().remove(node)
# Set custom key bindings.
nodeText = "function getCustomKeyBindingsSub()" + "\n"
nodeText += "{" + "\n"
nodeText += " var keyDict = new Object();" + "\n"
nodeText += " keyDict[SLIDE_MODE] = new Object();" + "\n"
nodeText += " keyDict[INDEX_MODE] = new Object();" + "\n"
nodeText += " keyDict[DRAWING_MODE] = new Object();" + "\n"
for key, value in self.slideKeyCodes.items():
nodeText += " keyDict[SLIDE_MODE][" + key + "] = function() { " + value + " };" + "\n"
for key, value in self.drawingKeyCodes.items():
nodeText += " keyDict[DRAWING_MODE][" + key + "] = function() { " + value + " };" + "\n"
for key, value in self.indexKeyCodes.items():
nodeText += " keyDict[INDEX_MODE][" + key + "] = function() { " + value + " };" + "\n"
nodeText += " return keyDict;" + "\n"
nodeText += "}" + "\n\n"
# Set custom char bindings.
nodeText += "function getCustomCharBindingsSub()" + "\n"
nodeText += "{" + "\n"
nodeText += " var charDict = new Object();" + "\n"
nodeText += " charDict[SLIDE_MODE] = new Object();" + "\n"
nodeText += " charDict[INDEX_MODE] = new Object();" + "\n"
nodeText += " charDict[DRAWING_MODE] = new Object();" + "\n"
for key, value in self.slideCharCodes.items():
nodeText += " charDict[SLIDE_MODE][\"" + key + "\"] = function() { " + value + " };" + "\n"
for key, value in self.drawingCharCodes.items():
nodeText += " charDict[DRAWING_MODE][\"" + key + "\"] = function() { " + value + " };" + "\n"
for key, value in self.indexCharCodes.items():
nodeText += " charDict[INDEX_MODE][\"" + key + "\"] = function() { " + value + " };" + "\n"
nodeText += " return charDict;" + "\n"
nodeText += "}" + "\n"
# Create new script node
scriptElm = inkex.etree.Element(inkex.addNS("script", "svg"))
scriptElm.text = nodeText
groupElm = inkex.etree.Element(inkex.addNS("g", "svg"))
groupElm.set("{" + inkex.NSS["jessyink"] + "}customKeyBindings", "customKeyBindings")
groupElm.set("onload", "this.getCustomCharBindings = function() { return getCustomCharBindingsSub(); }; this.getCustomKeyBindings = function() { return getCustomKeyBindingsSub(); };")
groupElm.append(scriptElm)
self.document.getroot().append(groupElm)
def getAction(self, varName):
parts = varName.split('_')
if (len(parts) != 2):
raise StandardException("Error parsing variable name.")
return parts[1]
# Create effect instance
effect = JessyInk_CustomKeyBindings()
effect.affect()
| gpl-2.0 |
RobinD42/pyside | tests/signals/ref06_test.py | 6 | 1133 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from PySide.QtCore import QObject, QCoreApplication, QTimeLine, Signal, Slot
from helper import UsesQCoreApplication
class ExtQObject(QObject):
signalbetween = Signal('qreal')
def __init__(self):
QObject.__init__(self)
self.counter = 0
@Slot('qreal')
def foo(self, value):
self.counter += 1
class SignaltoSignalTest(UsesQCoreApplication):
def setUp(self):
UsesQCoreApplication.setUp(self)
self.receiver = ExtQObject()
self.timeline = QTimeLine(100)
def tearDown(self):
del self.timeline
del self.receiver
UsesQCoreApplication.tearDown(self)
def testSignaltoSignal(self):
self.timeline.setUpdateInterval(10)
self.timeline.finished.connect(self.app.quit)
self.timeline.valueChanged.connect(self.receiver.signalbetween)
self.receiver.signalbetween.connect(self.receiver.foo)
self.timeline.start()
self.app.exec_()
self.assert_(self.receiver.counter > 1)
if __name__ == '__main__':
unittest.main()
| lgpl-2.1 |
atmtools/typhon | doc/example_google.py | 1 | 8646 | # -*- coding: utf-8 -*-
"""Example Google style docstrings.
This module demonstrates documentation as specified by the `Google Python
Style Guide`_. Docstrings may extend over multiple lines. Sections are created
with a section header and a colon followed by a block of indented text.
Example:
Examples can be given using either the ``Example`` or ``Examples``
sections. Sections support any reStructuredText formatting, including
literal blocks::
$ python example_google.py
Section breaks are created by resuming unindented text. Section breaks
are also implicitly created anytime a new section starts.
Attributes:
module_level_variable1 (int): Module level variables may be documented in
either the ``Attributes`` section of the module docstring, or in an
inline docstring immediately following the variable.
Either form is acceptable, but the two should not be mixed. Choose
one convention to document module level variables and be consistent
with it.
.. _Google Python Style Guide:
http://google.github.io/styleguide/pyguide.html
"""
module_level_variable1 = 12345
module_level_variable2 = 98765
"""int: Module level variable documented inline.
The docstring may span multiple lines. The type may optionally be specified
on the first line, separated by a colon.
"""
def module_level_function(param1, param2=None, *args, **kwargs):
"""This is an example of a module level function.
Function parameters should be documented in the ``Args`` section. The name
of each parameter is required. The type and description of each parameter
is optional, but should be included if not obvious.
Parameter types -- if given -- should be specified according to
`PEP 484`_, though `PEP 484`_ conformance isn't required or enforced.
If \*args or \*\*kwargs are accepted,
they should be listed as ``*args`` and ``**kwargs``.
The format for a parameter is::
name (type): description
The description may span multiple lines. Following
lines should be indented. The "(type)" is optional.
Multiple paragraphs are supported in parameter
descriptions.
Args:
param1 (int): The first parameter.
param2 (Optional[str]): The second parameter. Defaults to None.
Second line of description should be indented.
*args: Variable length argument list.
**kwargs: Arbitrary keyword arguments.
Returns:
bool: True if successful, False otherwise.
The return type is optional and may be specified at the beginning of
the ``Returns`` section followed by a colon.
The ``Returns`` section may span multiple lines and paragraphs.
Following lines should be indented to match the first line.
The ``Returns`` section supports any reStructuredText formatting,
including literal blocks::
{
'param1': param1,
'param2': param2
}
Raises:
AttributeError: The ``Raises`` section is a list of all exceptions
that are relevant to the interface.
ValueError: If `param2` is equal to `param1`.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
if param1 == param2:
raise ValueError('param1 may not be equal to param2')
return True
def example_generator(n):
"""Generators have a ``Yields`` section instead of a ``Returns`` section.
Args:
n (int): The upper limit of the range to generate, from 0 to `n` - 1.
Yields:
int: The next number in the range of 0 to `n` - 1.
Examples:
Examples should be written in doctest format, and should illustrate how
to use the function.
>>> print([i for i in example_generator(4)])
[0, 1, 2, 3]
"""
for i in range(n):
yield i
class ExampleError(Exception):
"""Exceptions are documented in the same way as classes.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
msg (str): Human readable string describing the exception.
code (Optional[int]): Error code.
Attributes:
msg (str): Human readable string describing the exception.
code (int): Exception error code.
"""
def __init__(self, msg, code):
self.msg = msg
self.code = code
class ExampleClass(object):
"""The summary line for a class docstring should fit on one line.
If the class has public attributes, they may be documented here
in an ``Attributes`` section and follow the same formatting as a
function's ``Args`` section. Alternatively, attributes may be documented
inline with the attribute's declaration (see __init__ method below).
Properties created with the ``@property`` decorator should be documented
in the property's getter method.
Attribute and property types -- if given -- should be specified according
to `PEP 484`_, though `PEP 484`_ conformance isn't required or enforced.
Attributes:
attr1 (str): Description of `attr1`.
attr2 (Optional[int]): Description of `attr2`.
.. _PEP 484:
https://www.python.org/dev/peps/pep-0484/
"""
def __init__(self, param1, param2, param3):
"""Example of docstring on the __init__ method.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1 (str): Description of `param1`.
param2 (Optional[int]): Description of `param2`. Multiple
lines are supported.
param3 (List[str]): Description of `param3`.
"""
self.attr1 = param1
self.attr2 = param2
self.attr3 = param3 #: Doc comment *inline* with attribute
#: List[str]: Doc comment *before* attribute, with type specified
self.attr4 = ['attr4']
self.attr5 = None
"""Optional[str]: Docstring *after* attribute, with type specified."""
@property
def readonly_property(self):
"""str: Properties should be documented in their getter method."""
return 'readonly_property'
@property
def readwrite_property(self):
"""List[str]: Properties with both a getter and setter should only
be documented in their getter method.
If the setter method contains notable behavior, it should be
mentioned here.
"""
return ['readwrite_property']
@readwrite_property.setter
def readwrite_property(self, value):
value
def example_method(self, param1, param2):
"""Class methods are similar to regular functions.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
param1: The first parameter.
param2: The second parameter.
Returns:
True if successful, False otherwise.
"""
return True
def __special__(self):
"""By default special members with docstrings are included.
Special members are any methods or attributes that start with and
end with a double underscore. Any special member with a docstring
will be included in the output.
This behavior can be disabled by changing the following setting in
Sphinx's conf.py::
napoleon_include_special_with_doc = False
"""
pass
def __special_without_docstring__(self):
pass
def _private(self):
"""By default private members are not included.
Private members are any methods or attributes that start with an
underscore and are *not* special. By default they are not included
in the output.
This behavior can be changed such that private members *are* included
by changing the following setting in Sphinx's conf.py::
napoleon_include_private_with_doc = True
"""
pass
def _private_without_docstring(self):
pass
| mit |
dwaynebailey/translate | translate/storage/tmdb.py | 3 | 13483 | # -*- coding: utf-8 -*-
#
# Copyright 2009, 2013 Zuza Software Foundation
# Copyright 2013 F Wolff
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Module to provide a translation memory database."""
import logging
import math
import re
import six
import threading
import time
from sqlite3 import dbapi2
from translate.lang import data
from translate.search.lshtein import LevenshteinComparer
STRIP_REGEXP = re.compile("\W", re.UNICODE)
class LanguageError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return str(self.value)
class TMDB(object):
_tm_dbs = {}
def __init__(self, db_file, max_candidates=3, min_similarity=75,
max_length=1000):
self.max_candidates = max_candidates
self.min_similarity = min_similarity
self.max_length = max_length
if not isinstance(db_file, six.text_type):
db_file = six.text_type(db_file) # don't know which encoding
self.db_file = db_file
# share connections to same database file between different instances
if db_file not in self._tm_dbs:
self._tm_dbs[db_file] = {}
self._tm_db = self._tm_dbs[db_file]
# FIXME: do we want to do any checks before we initialize the DB?
self.init_database()
self.fulltext = False
self.init_fulltext()
self.comparer = LevenshteinComparer(self.max_length)
self.preload_db()
def _get_connection(self, index):
current_thread = threading.currentThread()
if current_thread not in self._tm_db:
connection = dbapi2.connect(self.db_file.encode('utf-8') if six.PY2 else self.db_file)
cursor = connection.cursor()
self._tm_db[current_thread] = (connection, cursor)
return self._tm_db[current_thread][index]
connection = property(lambda self: self._get_connection(0))
cursor = property(lambda self: self._get_connection(1))
def init_database(self):
"""creates database tables and indices"""
script = """
CREATE TABLE IF NOT EXISTS sources (
sid INTEGER PRIMARY KEY AUTOINCREMENT,
text VARCHAR NOT NULL,
context VARCHAR DEFAULT NULL,
lang VARCHAR NOT NULL,
length INTEGER NOT NULL
);
CREATE INDEX IF NOT EXISTS sources_context_idx ON sources (context);
CREATE INDEX IF NOT EXISTS sources_lang_idx ON sources (lang);
CREATE INDEX IF NOT EXISTS sources_length_idx ON sources (length);
CREATE UNIQUE INDEX IF NOT EXISTS sources_uniq_idx ON sources (text, context, lang);
CREATE TABLE IF NOT EXISTS targets (
tid INTEGER PRIMARY KEY AUTOINCREMENT,
sid INTEGER NOT NULL,
text VARCHAR NOT NULL,
lang VARCHAR NOT NULL,
time INTEGER DEFAULT NULL,
FOREIGN KEY (sid) references sources(sid)
);
CREATE INDEX IF NOT EXISTS targets_sid_idx ON targets (sid);
CREATE INDEX IF NOT EXISTS targets_lang_idx ON targets (lang);
CREATE INDEX IF NOT EXISTS targets_time_idx ON targets (time);
CREATE UNIQUE INDEX IF NOT EXISTS targets_uniq_idx ON targets (sid, text, lang);
"""
try:
self.cursor.executescript(script)
self.connection.commit()
except Exception:
self.connection.rollback()
raise
def init_fulltext(self):
"""detects if fts3 fulltext indexing module exists, initializes fulltext table if it does"""
# HACKISH: no better way to detect fts3 support except trying to
# construct a dummy table?!
try:
script = """
DROP TABLE IF EXISTS test_for_fts3;
CREATE VIRTUAL TABLE test_for_fts3 USING fts3;
DROP TABLE test_for_fts3;
"""
self.cursor.executescript(script)
logging.debug("fts3 supported")
# for some reason CREATE VIRTUAL TABLE doesn't support IF NOT
# EXISTS syntax check if fulltext index table exists manually
self.cursor.execute("SELECT name FROM sqlite_master WHERE name = 'fulltext'")
if not self.cursor.fetchone():
# create fulltext index table, and index all strings in sources
script = """
CREATE VIRTUAL TABLE fulltext USING fts3(text);
"""
logging.debug("fulltext table not exists, creating")
self.cursor.executescript(script)
logging.debug("created fulltext table")
else:
logging.debug("fulltext table already exists")
# create triggers that would sync sources table with fulltext index
script = """
INSERT INTO fulltext (rowid, text) SELECT sid, text FROM sources WHERE sid NOT IN (SELECT rowid FROM fulltext);
CREATE TRIGGER IF NOT EXISTS sources_insert_trig AFTER INSERT ON sources FOR EACH ROW
BEGIN
INSERT INTO fulltext (docid, text) VALUES (NEW.sid, NEW.text);
END;
CREATE TRIGGER IF NOT EXISTS sources_update_trig AFTER UPDATE OF text ON sources FOR EACH ROW
BEGIN
UPDATE fulltext SET text = NEW.text WHERE docid = NEW.sid;
END;
CREATE TRIGGER IF NOT EXISTS sources_delete_trig AFTER DELETE ON sources FOR EACH ROW
BEGIN
DELETE FROM fulltext WHERE docid = OLD.sid;
END;
"""
self.cursor.executescript(script)
self.connection.commit()
logging.debug("created fulltext triggers")
self.fulltext = True
except dbapi2.OperationalError as e:
self.fulltext = False
logging.debug("failed to initialize fts3 support: " + str(e))
script = """
DROP TRIGGER IF EXISTS sources_insert_trig;
DROP TRIGGER IF EXISTS sources_update_trig;
DROP TRIGGER IF EXISTS sources_delete_trig;
"""
self.cursor.executescript(script)
def preload_db(self):
"""ugly hack to force caching of sqlite db file in memory for improved
performance
"""
if self.fulltext:
query = """SELECT COUNT(*) FROM sources s JOIN fulltext f ON s.sid = f.docid JOIN targets t on s.sid = t.sid"""
else:
query = """SELECT COUNT(*) FROM sources s JOIN targets t on s.sid = t.sid"""
self.cursor.execute(query)
(numrows,) = self.cursor.fetchone()
logging.debug("tmdb has %d records" % numrows)
return numrows
def add_unit(self, unit, source_lang=None, target_lang=None, commit=True):
"""inserts unit in the database"""
# TODO: is that really the best way to handle unspecified
# source and target languages? what about conflicts between
# unit attributes and passed arguments
if unit.getsourcelanguage():
source_lang = unit.getsourcelanguage()
if unit.gettargetlanguage():
target_lang = unit.gettargetlanguage()
if not source_lang:
raise LanguageError("undefined source language")
if not target_lang:
raise LanguageError("undefined target language")
unitdict = {
"source": unit.source,
"target": unit.target,
"context": unit.getcontext(),
}
self.add_dict(unitdict, source_lang, target_lang, commit)
def add_dict(self, unit, source_lang, target_lang, commit=True):
"""inserts units represented as dictionaries in database"""
source_lang = data.normalize_code(source_lang)
target_lang = data.normalize_code(target_lang)
try:
try:
self.cursor.execute("INSERT INTO sources (text, context, lang, length) VALUES(?, ?, ?, ?)",
(unit["source"],
unit["context"],
source_lang,
len(unit["source"])))
sid = self.cursor.lastrowid
except dbapi2.IntegrityError:
# source string already exists in db, run query to find sid
self.cursor.execute("SELECT sid FROM sources WHERE text=? AND context=? and lang=?",
(unit["source"],
unit["context"],
source_lang))
sid = self.cursor.fetchone()
(sid,) = sid
try:
# FIXME: get time info from translation store
# FIXME: do we need so store target length?
self.cursor.execute("INSERT INTO targets (sid, text, lang, time) VALUES (?, ?, ?, ?)",
(sid,
unit["target"],
target_lang,
int(time.time())))
except dbapi2.IntegrityError:
# target string already exists in db, do nothing
pass
if commit:
self.connection.commit()
except Exception:
if commit:
self.connection.rollback()
raise
def add_store(self, store, source_lang, target_lang, commit=True):
"""insert all units in store in database"""
count = 0
for unit in store.units:
if unit.istranslatable() and unit.istranslated():
self.add_unit(unit, source_lang, target_lang, commit=False)
count += 1
if commit:
self.connection.commit()
return count
def add_list(self, units, source_lang, target_lang, commit=True):
"""insert all units in list into the database, units are represented as
dictionaries
"""
count = 0
for unit in units:
self.add_dict(unit, source_lang, target_lang, commit=False)
count += 1
if commit:
self.connection.commit()
return count
def translate_unit(self, unit_source, source_langs, target_langs):
"""return TM suggestions for unit_source"""
if isinstance(unit_source, bytes):
unit_source = unit_source.decode("utf-8")
if isinstance(source_langs, list):
source_langs = [data.normalize_code(lang) for lang in source_langs]
source_langs = ','.join(source_langs)
else:
source_langs = data.normalize_code(source_langs)
if isinstance(target_langs, list):
target_langs = [data.normalize_code(lang) for lang in target_langs]
target_langs = ','.join(target_langs)
else:
target_langs = data.normalize_code(target_langs)
minlen = min_levenshtein_length(len(unit_source), self.min_similarity)
maxlen = max_levenshtein_length(len(unit_source), self.min_similarity,
self.max_length)
# split source into words, remove punctuation and special
# chars, keep words that are at least 3 chars long
unit_words = STRIP_REGEXP.sub(' ', unit_source).split()
unit_words = list(filter(lambda word: len(word) > 2, unit_words))
if self.fulltext and len(unit_words) > 3:
logging.debug("fulltext matching")
query = """SELECT s.text, t.text, s.context, s.lang, t.lang FROM sources s JOIN targets t ON s.sid = t.sid JOIN fulltext f ON s.sid = f.docid
WHERE s.lang IN (?) AND t.lang IN (?) AND s.length BETWEEN ? AND ?
AND fulltext MATCH ?"""
search_str = " OR ".join(unit_words)
self.cursor.execute(query, (source_langs, target_langs, minlen,
maxlen, search_str))
else:
logging.debug("nonfulltext matching")
query = """SELECT s.text, t.text, s.context, s.lang, t.lang FROM sources s JOIN targets t ON s.sid = t.sid
WHERE s.lang IN (?) AND t.lang IN (?)
AND s.length >= ? AND s.length <= ?"""
self.cursor.execute(query, (source_langs, target_langs, minlen,
maxlen))
results = []
for row in self.cursor:
quality = self.comparer.similarity(unit_source, row[0],
self.min_similarity)
if quality >= self.min_similarity:
results.append({
'source': row[0],
'target': row[1],
'context': row[2],
'quality': quality,
})
results.sort(key=lambda match: match['quality'], reverse=True)
results = results[:self.max_candidates]
logging.debug("results: %s", six.text_type(results))
return results
def min_levenshtein_length(length, min_similarity):
return math.ceil(max(length * (min_similarity / 100.0), 2))
def max_levenshtein_length(length, min_similarity, max_length):
return math.floor(min(length / (min_similarity / 100.0), max_length))
| gpl-2.0 |
kirca/OpenUpgrade | doc/_themes/odoodoc/__init__.py | 90 | 1063 | # -*- coding: utf-8 -*-
from . import html_domain
from . import github
# add Odoo style to pygments
from . import odoo_pygments
from . import sphinx_monkeypatch
sphinx_monkeypatch.patch()
def setup(app):
html_domain.setup(app)
github.setup(app)
app.add_directive('exercise', Exercise)
app.add_node(exercise, html=(
lambda self, node: self.visit_admonition(node, 'exercise'),
lambda self, node: self.depart_admonition(node)
), latex=(
lambda self, node: self.visit_admonition(node),
lambda self, node: self.depart_admonition(node)
))
from docutils import nodes
from docutils.parsers.rst.directives import admonitions
class exercise(nodes.Admonition, nodes.Element): pass
class Exercise(admonitions.BaseAdmonition):
node_class = exercise
from sphinx.locale import admonitionlabels, l_
admonitionlabels['exercise'] = l_('Exercise')
# monkeypatch PHP lexer to not require <?php
from sphinx.highlighting import lexers
from pygments.lexers.web import PhpLexer
lexers['php'] = PhpLexer(startinline=True)
| agpl-3.0 |
GinnyN/towerofdimensions-django | django-social-auth/social_auth/middleware.py | 3 | 1892 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib import messages
from django.shortcuts import redirect
from social_auth.backends.exceptions import AuthException
class SocialAuthExceptionMiddleware(object):
"""Middleware that handles Social Auth AuthExceptions by providing the user
with a message, logging an error, and redirecting to some next location.
By default, the exception message itself is sent to the user and they are
redirected to the location specified in the LOGIN_ERROR_URL setting.
This middleware can be extended by overriding the get_message or
get_redirect_uri methods, which each accept request and exception.
"""
def process_exception(self, request, exception):
if isinstance(exception, AuthException):
if hasattr(exception.backend, 'AUTH_BACKEND'):
backend_name = exception.backend.AUTH_BACKEND.name
else:
backend_name = exception.backend.name
message = self.get_message(request, exception)
url = self.get_redirect_uri(request, exception)
if request.user.is_authenticated():
# Ensure that messages are added to authenticated users only,
# otherwise this fails
messages.error(
request,
message,
extra_tags=u'social-auth {0}'.format(backend_name)
)
else:
url = url + ('?' in url and '&' or '?') \
+ 'message={0}&backend={1}'.format(message,
backend_name)
return redirect(url)
def get_message(self, request, exception):
return unicode(exception)
def get_redirect_uri(self, request, exception):
return settings.LOGIN_ERROR_URL
| bsd-3-clause |
Endika/edx-platform | common/djangoapps/auth_exchange/forms.py | 113 | 3848 | """
Forms to support third-party to first-party OAuth 2.0 access token exchange
"""
from django.contrib.auth.models import User
from django.forms import CharField
from oauth2_provider.constants import SCOPE_NAMES
import provider.constants
from provider.forms import OAuthForm, OAuthValidationError
from provider.oauth2.forms import ScopeChoiceField, ScopeMixin
from provider.oauth2.models import Client
from requests import HTTPError
from social.backends import oauth as social_oauth
from social.exceptions import AuthException
from third_party_auth import pipeline
class AccessTokenExchangeForm(ScopeMixin, OAuthForm):
"""Form for access token exchange endpoint"""
access_token = CharField(required=False)
scope = ScopeChoiceField(choices=SCOPE_NAMES, required=False)
client_id = CharField(required=False)
def __init__(self, request, *args, **kwargs):
super(AccessTokenExchangeForm, self).__init__(*args, **kwargs)
self.request = request
def _require_oauth_field(self, field_name):
"""
Raise an appropriate OAuthValidationError error if the field is missing
"""
field_val = self.cleaned_data.get(field_name)
if not field_val:
raise OAuthValidationError(
{
"error": "invalid_request",
"error_description": "{} is required".format(field_name),
}
)
return field_val
def clean_access_token(self):
"""
Validates and returns the "access_token" field.
"""
return self._require_oauth_field("access_token")
def clean_client_id(self):
"""
Validates and returns the "client_id" field.
"""
return self._require_oauth_field("client_id")
def clean(self):
if self._errors:
return {}
backend = self.request.backend
if not isinstance(backend, social_oauth.BaseOAuth2):
raise OAuthValidationError(
{
"error": "invalid_request",
"error_description": "{} is not a supported provider".format(backend.name),
}
)
self.request.session[pipeline.AUTH_ENTRY_KEY] = pipeline.AUTH_ENTRY_LOGIN_API
client_id = self.cleaned_data["client_id"]
try:
client = Client.objects.get(client_id=client_id)
except Client.DoesNotExist:
raise OAuthValidationError(
{
"error": "invalid_client",
"error_description": "{} is not a valid client_id".format(client_id),
}
)
if client.client_type != provider.constants.PUBLIC:
raise OAuthValidationError(
{
# invalid_client isn't really the right code, but this mirrors
# https://github.com/edx/django-oauth2-provider/blob/edx/provider/oauth2/forms.py#L331
"error": "invalid_client",
"error_description": "{} is not a public client".format(client_id),
}
)
self.cleaned_data["client"] = client
user = None
try:
user = backend.do_auth(self.cleaned_data.get("access_token"), allow_inactive_user=True)
except (HTTPError, AuthException):
pass
if user and isinstance(user, User):
self.cleaned_data["user"] = user
else:
# Ensure user does not re-enter the pipeline
self.request.social_strategy.clean_partial_pipeline()
raise OAuthValidationError(
{
"error": "invalid_grant",
"error_description": "access_token is not valid",
}
)
return self.cleaned_data
| agpl-3.0 |
rjenc29/numerical | course/matplotlib/examples/fill_example.py | 1 | 2229 | """
Illustrate different ways of using the various fill functions.
"""
import numpy as np
import matplotlib.pyplot as plt
import example_utils
def main():
fig, axes = example_utils.setup_axes()
fill_example(axes[0])
fill_between_example(axes[1])
stackplot_example(axes[2])
example_utils.title(fig, 'fill/fill_between/stackplot: Filled polygons',
y=0.95)
fig.savefig('fill_example.png', facecolor='none')
plt.show()
def fill_example(ax):
# Use fill when you want a simple filled polygon between vertices
x, y = fill_data()
ax.fill(x, y, color='lightblue')
ax.margins(0.1)
example_utils.label(ax, 'fill')
def fill_between_example(ax):
# Fill between fills between two curves or a curve and a constant value
# It can be used in several ways. We'll illustrate a few below.
x, y1, y2 = sin_data()
# The most basic (and common) use of fill_between
err = np.random.rand(x.size)**2 + 0.1
y = 0.7 * x + 2
ax.fill_between(x, y + err, y - err, color='orange')
# Filling between two curves with different colors when they cross in
# different directions
ax.fill_between(x, y1, y2, where=y1>y2, color='lightblue')
ax.fill_between(x, y1, y2, where=y1<y2, color='forestgreen')
# Note that this is fillbetween*x*!
ax.fill_betweenx(x, -y1, where=y1>0, color='red', alpha=0.5)
ax.fill_betweenx(x, -y1, where=y1<0, color='blue', alpha=0.5)
ax.margins(0.15)
example_utils.label(ax, 'fill_between/x')
def stackplot_example(ax):
# Stackplot is equivalent to a series of ax.fill_between calls
x, y = stackplot_data()
ax.stackplot(x, y.cumsum(axis=0), alpha=0.5)
example_utils.label(ax, 'stackplot')
#-- Data generation ----------------------
def stackplot_data():
x = np.linspace(0, 10, 100)
y = np.random.normal(0, 1, (5, 100))
y = y.cumsum(axis=1)
y -= y.min(axis=0, keepdims=True)
return x, y
def sin_data():
x = np.linspace(0, 10, 100)
y = np.sin(x)
y2 = np.cos(x)
return x, y, y2
def fill_data():
t = np.linspace(0, 2*np.pi, 100)
r = np.random.normal(0, 1, 100).cumsum()
r -= r.min()
return r * np.cos(t), r * np.sin(t)
main()
| mit |
EDUlib/edx-platform | lms/djangoapps/courseware/services.py | 4 | 1267 | """
Courseware services.
"""
import json
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from lms.djangoapps.courseware.models import StudentModule
from common.djangoapps.student.models import get_user_by_username_or_email
class UserStateService:
"""
User state service to make state accessible in runtime.
"""
def get_state_as_dict(self, username_or_email, block_id):
"""
Return dict containing user state for a given set of parameters.
Arguments:
username_or_email: username or email of the user for whom the data is being retrieved
block_id: string/object representation of the block whose user state is required
Returns:
Returns a dict containing user state, if present, else empty.
"""
try:
user = get_user_by_username_or_email(username_or_email=username_or_email)
except User.DoesNotExist:
return {}
try:
student_module = StudentModule.objects.get(
student=user,
module_state_key=block_id
)
return json.loads(student_module.state)
except StudentModule.DoesNotExist:
return {}
| agpl-3.0 |
seninp/saxpy | saxpy/hotsax.py | 1 | 4860 | """Implements HOT-SAX."""
import numpy as np
from saxpy.znorm import znorm
from saxpy.sax import sax_via_window
from saxpy.distance import euclidean
def find_discords_hotsax(series, win_size=100, num_discords=2, alphabet_size=3,
paa_size=3, znorm_threshold=0.01, sax_type='unidim'):
"""HOT-SAX-driven discords discovery."""
discords = list()
global_registry = set()
# Z-normalized versions for every subsequence.
znorms = np.array([znorm(series[pos: pos + win_size], znorm_threshold) for pos in range(len(series) - win_size + 1)])
# SAX words for every subsequence.
sax_data = sax_via_window(series, win_size=win_size, paa_size=paa_size, alphabet_size=alphabet_size,
nr_strategy=None, znorm_threshold=0.01, sax_type=sax_type)
"""[2.0] build the 'magic' array"""
magic_array = list()
for k, v in sax_data.items():
magic_array.append((k, len(v)))
"""[2.1] sort it ascending by the number of occurrences"""
magic_array = sorted(magic_array, key=lambda tup: tup[1])
while len(discords) < num_discords:
best_discord = find_best_discord_hotsax(series, win_size, global_registry, sax_data, magic_array, znorms)
if -1 == best_discord[0]:
break
discords.append(best_discord)
mark_start = max(0, best_discord[0] - win_size + 1)
mark_end = best_discord[0] + win_size
for i in range(mark_start, mark_end):
global_registry.add(i)
return discords
def find_best_discord_hotsax(series, win_size, global_registry, sax_data, magic_array, znorms):
"""Find the best discord with hotsax."""
"""[3.0] define the key vars"""
best_so_far_position = -1
best_so_far_distance = 0.
distance_calls = 0
visit_array = np.zeros(len(series), dtype=np.int)
"""[4.0] and we are off iterating over the magic array entries"""
for entry in magic_array:
"""[5.0] current SAX words and the number of other sequences mapping to the same SAX word."""
curr_word = entry[0]
occurrences = sax_data[curr_word]
"""[6.0] jumping around by the same word occurrences makes it easier to
nail down the possibly small distance value -- so we can be efficient
and all that..."""
for curr_pos in occurrences:
if curr_pos in global_registry:
continue
"""[7.0] we don't want an overlapping subsequence"""
mark_start = curr_pos - win_size + 1
mark_end = curr_pos + win_size
visit_set = set(range(mark_start, mark_end))
"""[8.0] here is our subsequence in question"""
cur_seq = znorms[curr_pos]
"""[9.0] let's see what is NN distance"""
nn_dist = np.inf
do_random_search = True
"""[10.0] ordered by occurrences search first"""
for next_pos in occurrences:
"""[11.0] skip bad pos"""
if next_pos in visit_set:
continue
else:
visit_set.add(next_pos)
"""[12.0] distance we compute"""
dist = euclidean(cur_seq, znorms[next_pos])
distance_calls += 1
"""[13.0] keep the books up-to-date"""
if dist < nn_dist:
nn_dist = dist
if dist < best_so_far_distance:
do_random_search = False
break
"""[13.0] if not broken above,
we shall proceed with random search"""
if do_random_search:
"""[14.0] build that random visit order array"""
curr_idx = 0
for i in range(0, (len(series) - win_size + 1)):
if not(i in visit_set):
visit_array[curr_idx] = i
curr_idx += 1
it_order = np.random.permutation(visit_array[0:curr_idx])
curr_idx -= 1
"""[15.0] and go random"""
while curr_idx >= 0:
rand_pos = it_order[curr_idx]
curr_idx -= 1
dist = euclidean(cur_seq, znorms[rand_pos])
distance_calls += 1
"""[16.0] keep the books up-to-date again"""
if dist < nn_dist:
nn_dist = dist
if dist < best_so_far_distance:
nn_dist = dist
break
"""[17.0] and BIGGER books"""
if (nn_dist > best_so_far_distance) and (nn_dist < np.inf):
best_so_far_distance = nn_dist
best_so_far_position = curr_pos
return best_so_far_position, best_so_far_distance
| gpl-2.0 |
pedrobaeza/odoo | addons/membership/report/report_membership.py | 55 | 6117 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
import openerp.addons.decimal_precision as dp
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
class report_membership(osv.osv):
'''Membership Analysis'''
_name = 'report.membership'
_description = __doc__
_auto = False
_rec_name = 'year'
_columns = {
'year': fields.char('Year', size=4, readonly=True, select=1),
'month': fields.selection([('01', 'January'), ('02', 'February'), \
('03', 'March'), ('04', 'April'),\
('05', 'May'), ('06', 'June'), \
('07', 'July'), ('08', 'August'),\
('09', 'September'), ('10', 'October'),\
('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'date_from': fields.datetime('Start Date', readonly=True, help="Start membership date"),
'date_to': fields.datetime('End Date', readonly=True, help="End membership date"),
'num_waiting': fields.integer('# Waiting', readonly=True),
'num_invoiced': fields.integer('# Invoiced', readonly=True),
'num_paid': fields.integer('# Paid', readonly=True),
'tot_pending': fields.float('Pending Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'tot_earned': fields.float('Earned Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'partner_id': fields.many2one('res.partner', 'Member', readonly=True),
'associate_member_id': fields.many2one('res.partner', 'Associate Member', readonly=True),
'membership_id': fields.many2one('product.product', 'Membership Product', readonly=True),
'membership_state': fields.selection(STATE, 'Current Membership State', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True)
}
def init(self, cr):
'''Create the view'''
tools.drop_view_if_exists(cr, 'report_membership')
cr.execute("""
CREATE OR REPLACE VIEW report_membership AS (
SELECT
MIN(id) AS id,
partner_id,
user_id,
membership_state,
associate_member_id,
membership_amount,
date_from,
date_to,
year,
month,
COUNT(num_waiting) AS num_waiting,
COUNT(num_invoiced) AS num_invoiced,
COUNT(num_paid) AS num_paid,
SUM(tot_pending) AS tot_pending,
SUM(tot_earned) AS tot_earned,
membership_id,
company_id
FROM
(SELECT
MIN(p.id) AS id,
p.id AS partner_id,
p.user_id AS user_id,
p.membership_state AS membership_state,
p.associate_member AS associate_member_id,
p.membership_amount AS membership_amount,
TO_CHAR(p.membership_start, 'YYYY-MM-DD') AS date_from,
TO_CHAR(p.membership_stop, 'YYYY-MM-DD') AS date_to,
TO_CHAR(p.membership_start, 'YYYY') AS year,
TO_CHAR(p.membership_start,'MM') AS month,
CASE WHEN ml.state = 'waiting' THEN ml.id END AS num_waiting,
CASE WHEN ml.state = 'invoiced' THEN ml.id END AS num_invoiced,
CASE WHEN ml.state = 'paid' THEN ml.id END AS num_paid,
CASE WHEN ml.state IN ('waiting', 'invoiced') THEN SUM(il.price_subtotal) ELSE 0 END AS tot_pending,
CASE WHEN ml.state = 'paid' OR p.membership_state = 'old' THEN SUM(il.price_subtotal) ELSE 0 END AS tot_earned,
ml.membership_id AS membership_id,
p.company_id AS company_id
FROM res_partner p
LEFT JOIN membership_membership_line ml ON (ml.partner = p.id)
LEFT JOIN account_invoice_line il ON (ml.account_invoice_line = il.id)
LEFT JOIN account_invoice ai ON (il.invoice_id = ai.id)
WHERE p.membership_state != 'none' and p.active = 'true'
GROUP BY
p.id,
p.user_id,
p.membership_state,
p.associate_member,
p.membership_amount,
TO_CHAR(p.membership_start, 'YYYY-MM-DD'),
TO_CHAR(p.membership_stop, 'YYYY-MM-DD'),
TO_CHAR(p.membership_start, 'YYYY'),
TO_CHAR(p.membership_start,'MM'),
ml.membership_id,
p.company_id,
ml.state,
ml.id
) AS foo
GROUP BY
year,
month,
date_from,
date_to,
partner_id,
user_id,
membership_id,
company_id,
membership_state,
associate_member_id,
membership_amount
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
urrego093/proyecto_mv | gluon/contrib/pyrtf/Elements.py | 43 | 33090 | from types import IntType, FloatType, LongType, StringTypes
from copy import deepcopy
from binascii import hexlify
from Constants import *
from Styles import *
class UnhandledParamError( Exception ) :
def __init__( self, param ) :
Exception.__init__( self, "Don't know what to do with param %s" % param )
# red green blue
StandardColours = Colours()
StandardColours.append( Colour( 'Black', 0, 0, 0 ) )
StandardColours.append( Colour( 'Blue', 0, 0, 255 ) )
StandardColours.append( Colour( 'Turquoise', 0, 255, 255 ) )
StandardColours.append( Colour( 'Green', 0, 255, 0 ) )
StandardColours.append( Colour( 'Pink', 255, 0, 255 ) )
StandardColours.append( Colour( 'Red', 255, 0, 0 ) )
StandardColours.append( Colour( 'Yellow', 255, 255, 0 ) )
StandardColours.append( Colour( 'White', 255, 255, 255 ) )
StandardColours.append( Colour( 'Blue Dark', 0, 0, 128 ) )
StandardColours.append( Colour( 'Teal', 0, 128, 128 ) )
StandardColours.append( Colour( 'Green Dark', 0, 128, 0 ) )
StandardColours.append( Colour( 'Violet', 128, 0, 128 ) )
StandardColours.append( Colour( 'Red Dark', 128, 0, 0 ) )
StandardColours.append( Colour( 'Yellow Dark', 128, 128, 0 ) )
StandardColours.append( Colour( 'Grey Dark', 128, 128, 128 ) )
StandardColours.append( Colour( 'Grey', 192, 192, 192 ) )
StandardFonts = Fonts()
StandardFonts.append( Font( 'Arial' , 'swiss' , 0, 2, '020b0604020202020204' ) )
StandardFonts.append( Font( 'Arial Black' , 'swiss' , 0, 2, '020b0a04020102020204' ) )
StandardFonts.append( Font( 'Arial Narrow' , 'swiss' , 0, 2, '020b0506020202030204' ) )
StandardFonts.append( Font( 'Bitstream Vera Sans Mono', 'modern', 0, 1, '020b0609030804020204' ) )
StandardFonts.append( Font( 'Bitstream Vera Sans' , 'swiss' , 0, 2, '020b0603030804020204' ) )
StandardFonts.append( Font( 'Bitstream Vera Serif' , 'roman' , 0, 2, '02060603050605020204' ) )
StandardFonts.append( Font( 'Book Antiqua' , 'roman' , 0, 2, '02040602050305030304' ) )
StandardFonts.append( Font( 'Bookman Old Style' , 'roman' , 0, 2, '02050604050505020204' ) )
StandardFonts.append( Font( 'Castellar' , 'roman' , 0, 2, '020a0402060406010301' ) )
StandardFonts.append( Font( 'Century Gothic' , 'swiss' , 0, 2, '020b0502020202020204' ) )
StandardFonts.append( Font( 'Comic Sans MS' , 'script', 0, 2, '030f0702030302020204' ) )
StandardFonts.append( Font( 'Courier New' , 'modern', 0, 1, '02070309020205020404' ) )
StandardFonts.append( Font( 'Franklin Gothic Medium' , 'swiss' , 0, 2, '020b0603020102020204' ) )
StandardFonts.append( Font( 'Garamond' , 'roman' , 0, 2, '02020404030301010803' ) )
StandardFonts.append( Font( 'Georgia' , 'roman' , 0, 2, '02040502050405020303' ) )
StandardFonts.append( Font( 'Haettenschweiler' , 'swiss' , 0, 2, '020b0706040902060204' ) )
StandardFonts.append( Font( 'Impact' , 'swiss' , 0, 2, '020b0806030902050204' ) )
StandardFonts.append( Font( 'Lucida Console' , 'modern', 0, 1, '020b0609040504020204' ) )
StandardFonts.append( Font( 'Lucida Sans Unicode' , 'swiss' , 0, 2, '020b0602030504020204' ) )
StandardFonts.append( Font( 'Microsoft Sans Serif' , 'swiss' , 0, 2, '020b0604020202020204' ) )
StandardFonts.append( Font( 'Monotype Corsiva' , 'script', 0, 2, '03010101010201010101' ) )
StandardFonts.append( Font( 'Palatino Linotype' , 'roman' , 0, 2, '02040502050505030304' ) )
StandardFonts.append( Font( 'Papyrus' , 'script', 0, 2, '03070502060502030205' ) )
StandardFonts.append( Font( 'Sylfaen' , 'roman' , 0, 2, '010a0502050306030303' ) )
StandardFonts.append( Font( 'Symbol' , 'roman' , 2, 2, '05050102010706020507' ) )
StandardFonts.append( Font( 'Tahoma' , 'swiss' , 0, 2, '020b0604030504040204' ) )
StandardFonts.append( Font( 'Times New Roman' , 'roman' , 0, 2, '02020603050405020304' ) )
StandardFonts.append( Font( 'Trebuchet MS' , 'swiss' , 0, 2, '020b0603020202020204' ) )
StandardFonts.append( Font( 'Verdana' , 'swiss' , 0, 2, '020b0604030504040204' ) )
StandardFonts.Castellar.SetAlternate( StandardFonts.Georgia )
"""
Found the following definition at http://www.pbdr.com/vbtips/gen/convtwip.htm
Twips are screen-independent units used to ensure that the placement and
proportion of screen elements in your screen application are the same on all
display systems. A twip is a unit of screen measurement equal to 1/20 of a
printer's point. The conversion between twips and
inches/centimeters/millimeters is as follows:
There are approximately 1440 twips to a inch (the length of a screen item
measuring one inch when printed).
As there are 2.54 centimeters to 1 inch, then there are approximately 567
twips to a centimeter (the length of a screen item measuring one centimeter
when printed).
Or in millimeters, as there are 25.4 millimeters to 1 inch, therefore there
are approximately 56.7 twips to a millimeter (the length of a screen item
measuring one millimeter when printed)."""
# Width default is 12240, Height default is 15840
StandardPaper = Papers()
StandardPaper.append( Paper( 'LETTER' , 1, 'Letter 8 1/2 x 11 in' , 12240, 15840 ) )
StandardPaper.append( Paper( 'LETTERSMALL' , 2, 'Letter Small 8 1/2 x 11 in' , 12240, 15840 ) )
StandardPaper.append( Paper( 'TABLOID' , 3, 'Tabloid 11 x 17 in' , 15840, 24480 ) )
StandardPaper.append( Paper( 'LEDGER' , 4, 'Ledger 17 x 11 in' , 24480, 15840 ) )
StandardPaper.append( Paper( 'LEGAL' , 5, 'Legal 8 1/2 x 14 in' , 12240, 20160 ) )
StandardPaper.append( Paper( 'STATEMENT' , 6, 'Statement 5 1/2 x 8 1/2 in' , 7920, 12240 ) )
StandardPaper.append( Paper( 'EXECUTIVE' , 7, 'Executive 7 1/4 x 10 1/2 in' , 10440, 15120 ) )
StandardPaper.append( Paper( 'A3' , 8, 'A3 297 x 420 mm' , 16838, 23811 ) )
StandardPaper.append( Paper( 'A4' , 9, 'A4 210 x 297 mm' , 11907, 16838 ) )
StandardPaper.append( Paper( 'A4SMALL' , 10, 'A4 Small 210 x 297 mm' , 11907, 16838 ) )
StandardPaper.append( Paper( 'A5' , 11, 'A5 148 x 210 mm' , 8391, 11907 ) )
StandardPaper.append( Paper( 'B4' , 12, 'B4 (JIS) 250 x 354' , 14175, 20072 ) )
StandardPaper.append( Paper( 'B5' , 13, 'B5 (JIS) 182 x 257 mm' , 10319, 14572 ) )
StandardPaper.append( Paper( 'FOLIO' , 14, 'Folio 8 1/2 x 13 in' , 12240, 18720 ) )
StandardPaper.append( Paper( 'QUARTO' , 15, 'Quarto 215 x 275 mm' , 12191, 15593 ) )
StandardPaper.append( Paper( '10X14' , 16, '10x14 in' , 14400, 20160 ) )
StandardPaper.append( Paper( '11X17' , 17, '11x17 in' , 15840, 24480 ) )
StandardPaper.append( Paper( 'NOTE' , 18, 'Note 8 1/2 x 11 in' , 12240, 15840 ) )
StandardPaper.append( Paper( 'ENV_9' , 19, 'Envelope #9 3 7/8 x 8 7/8' , 5580, 12780 ) )
StandardPaper.append( Paper( 'ENV_10' , 20, 'Envelope #10 4 1/8 x 9 1/2' , 5940, 13680 ) )
StandardPaper.append( Paper( 'ENV_11' , 21, 'Envelope #11 4 1/2 x 10 3/8' , 6480, 14940 ) )
StandardPaper.append( Paper( 'ENV_12' , 22, 'Envelope #12 4 3/4 x 11' , 6840, 15840 ) )
StandardPaper.append( Paper( 'ENV_14' , 23, 'Envelope #14 5 x 11 1/2' , 7200, 16560 ) )
StandardPaper.append( Paper( 'CSHEET' , 24, 'C size sheet 18 x 24 in' , 29520, 34560 ) )
StandardPaper.append( Paper( 'DSHEET' , 25, 'D size sheet 22 x 34 in' , 31680, 48960 ) )
StandardPaper.append( Paper( 'ESHEET' , 26, 'E size sheet 34 x 44 in' , 48960, 63360 ) )
StandardPaper.append( Paper( 'ENV_DL' , 27, 'Envelope DL 110 x 220mm' , 6237, 12474 ) )
StandardPaper.append( Paper( 'ENV_C5' , 28, 'Envelope C5 162 x 229 mm' , 9185, 12984 ) )
StandardPaper.append( Paper( 'ENV_C3' , 29, 'Envelope C3 324 x 458 mm' , 18371, 25969 ) )
StandardPaper.append( Paper( 'ENV_C4' , 30, 'Envelope C4 229 x 324 mm' , 12984, 18371 ) )
StandardPaper.append( Paper( 'ENV_C6' , 31, 'Envelope C6 114 x 162 mm' , 6464, 9185 ) )
StandardPaper.append( Paper( 'ENV_C65' , 32, 'Envelope C65 114 x 229 mm' , 6464, 12984 ) )
StandardPaper.append( Paper( 'ENV_B4' , 33, 'Envelope B4 250 x 353 mm' , 14175, 20015 ) )
StandardPaper.append( Paper( 'ENV_B5' , 34, 'Envelope B5 176 x 250 mm' , 9979, 14175 ) )
StandardPaper.append( Paper( 'ENV_B6' , 35, 'Envelope B6 176 x 125 mm' , 9979, 7088 ) )
StandardPaper.append( Paper( 'ENV_ITALY' , 36, 'Envelope 110 x 230 mm' , 6237, 13041 ) )
StandardPaper.append( Paper( 'ENV_MONARCH' , 37, 'Envelope Monarch 3.875 x 7.5 in' , 5580, 10800 ) )
StandardPaper.append( Paper( 'ENV_PERSONAL' , 38, '6 3/4 Envelope 3 5/8 x 6 1/2 in' , 5220, 9360 ) )
StandardPaper.append( Paper( 'FANFOLD_US' , 39, 'US Std Fanfold 14 7/8 x 11 in' , 21420, 15840 ) )
StandardPaper.append( Paper( 'FANFOLD_STD_GERMAN' , 40, 'German Std Fanfold 8 1/2 x 12 in' , 12240, 17280 ) )
StandardPaper.append( Paper( 'FANFOLD_LGL_GERMAN' , 41, 'German Legal Fanfold 8 1/2 x 13 in' , 12240, 18720 ) )
#
# Finally a StyleSheet in which all of this stuff is put together
#
class StyleSheet :
def __init__( self, colours=None, fonts=None ) :
self.Colours = colours or deepcopy( StandardColours )
self.Fonts = fonts or deepcopy( StandardFonts )
self.TextStyles = AttributedList()
self.ParagraphStyles = AttributedList()
class Section( list ) :
NONE = 1
COLUMN = 2
PAGE = 3
EVEN = 4
ODD = 5
BREAK_TYPES = [ NONE, COLUMN, PAGE, EVEN, ODD ]
def __init__( self, paper=None, margins=None, break_type=None, headery=None, footery=None, landscape=None, first_page_number=None ) :
super( Section, self ).__init__()
self.Paper = paper or StandardPaper.A4
self.SetMargins( margins )
self.Header = []
self.Footer = []
self.FirstHeader = []
self.FirstFooter = []
self.SetBreakType( break_type or self.NONE )
self.SetHeaderY( headery )
self.SetFooterY( footery )
self.SetLandscape( landscape )
self.SetFirstPageNumber( first_page_number )
def TwipsToRightMargin( self ) :
return self.Paper.Width - ( self.Margins.Left + self.Margins.Right )
def SetMargins( self, value ) :
self.Margins = value or MarginsPropertySet( top=1000, left=1200, bottom=1000, right=1200 )
self.Width = self.Paper.Width - ( self.Margins.Left + self.Margins.Right )
def SetBreakType( self, value ) :
assert value in self.BREAK_TYPES
self.BreakType = value
return self
def SetHeaderY( self, value ) :
self.HeaderY = value
return self
def SetFooterY( self, value ) :
self.FooterY = value
return self
def SetLandscape( self, value ) :
self.Landscape = False
if value : self.Landscape = True
return self
def SetFirstPageNumber( self, value ) :
self.FirstPageNumber = value
return self
def MakeDefaultStyleSheet( ) :
result = StyleSheet()
NormalText = TextStyle( TextPropertySet( result.Fonts.Arial, 22 ) )
ps = ParagraphStyle( 'Normal',
NormalText.Copy(),
ParagraphPropertySet( space_before = 60,
space_after = 60 ) )
result.ParagraphStyles.append( ps )
ps = ParagraphStyle( 'Normal Short',
NormalText.Copy() )
result.ParagraphStyles.append( ps )
NormalText.TextPropertySet.SetSize( 32 )
ps = ParagraphStyle( 'Heading 1',
NormalText.Copy(),
ParagraphPropertySet( space_before = 240,
space_after = 60 ) )
result.ParagraphStyles.append( ps )
NormalText.TextPropertySet.SetSize( 24 ).SetBold( True )
ps = ParagraphStyle( 'Heading 2',
NormalText.Copy(),
ParagraphPropertySet( space_before = 240,
space_after = 60 ) )
result.ParagraphStyles.append( ps )
# Add some more in that are based on the normal template but that
# have some indenting set that makes them suitable for doing numbered
normal_numbered = result.ParagraphStyles.Normal.Copy()
normal_numbered.SetName( 'Normal Numbered' )
normal_numbered.ParagraphPropertySet.SetFirstLineIndent( TabPropertySet.DEFAULT_WIDTH * -1 )
normal_numbered.ParagraphPropertySet.SetLeftIndent ( TabPropertySet.DEFAULT_WIDTH )
result.ParagraphStyles.append( normal_numbered )
normal_numbered2 = result.ParagraphStyles.Normal.Copy()
normal_numbered2.SetName( 'Normal Numbered 2' )
normal_numbered2.ParagraphPropertySet.SetFirstLineIndent( TabPropertySet.DEFAULT_WIDTH * -1 )
normal_numbered2.ParagraphPropertySet.SetLeftIndent ( TabPropertySet.DEFAULT_WIDTH * 2 )
result.ParagraphStyles.append( normal_numbered2 )
## LIST STYLES
for idx, indent in [ (1, TabPS.DEFAULT_WIDTH ),
(2, TabPS.DEFAULT_WIDTH * 2),
(3, TabPS.DEFAULT_WIDTH * 3) ] :
indent = TabPropertySet.DEFAULT_WIDTH
ps = ParagraphStyle( 'List %s' % idx,
TextStyle( TextPropertySet( result.Fonts.Arial, 22 ) ),
ParagraphPropertySet( space_before = 60,
space_after = 60,
first_line_indent = -indent,
left_indent = indent) )
result.ParagraphStyles.append( ps )
return result
class TAB : pass
class LINE : pass
class RawCode :
def __init__( self, data ) :
self.Data = data
PAGE_NUMBER = RawCode( r'{\field{\fldinst page}}' )
TOTAL_PAGES = RawCode( r'{\field{\fldinst numpages}}' )
SECTION_PAGES = RawCode( r'{\field{\fldinst sectionpages}}' )
ARIAL_BULLET = RawCode( r'{\f2\'95}' )
def _get_jpg_dimensions( fin ):
"""
converted from: http://dev.w3.org/cvsweb/Amaya/libjpeg/rdjpgcom.c?rev=1.2
"""
M_SOF0 = chr( 0xC0 ) # /* Start Of Frame N */
M_SOF1 = chr( 0xC1 ) # /* N indicates which compression process */
M_SOF2 = chr( 0xC2 ) # /* Only SOF0-SOF2 are now in common use */
M_SOF3 = chr( 0xC3 ) #
M_SOF5 = chr( 0xC5 ) # /* NB: codes C4 and CC are NOT SOF markers */
M_SOF6 = chr( 0xC6 ) #
M_SOF7 = chr( 0xC7 ) #
M_SOF9 = chr( 0xC9 ) #
M_SOF10 = chr( 0xCA ) #
M_SOF11 = chr( 0xCB ) #
M_SOF13 = chr( 0xCD ) #
M_SOF14 = chr( 0xCE ) #
M_SOF15 = chr( 0xCF ) #
M_SOI = chr( 0xD8 ) # /* Start Of Image (beginning of datastream) */
M_EOI = chr( 0xD9 ) # /* End Of Image (end of datastream) */
M_FF = chr( 0xFF )
MARKERS = [ M_SOF0, M_SOF1, M_SOF2, M_SOF3,
M_SOF5, M_SOF6, M_SOF7, M_SOF9,
M_SOF10,M_SOF11, M_SOF13, M_SOF14,
M_SOF15 ]
def get_length() :
b1 = fin.read( 1 )
b2 = fin.read( 1 )
return (ord(b1) << 8) + ord(b2)
def next_marker() :
# markers come straight after an 0xFF so skip everything
# up to the first 0xFF that we find
while fin.read(1) != M_FF :
pass
# there can be more than one 0xFF as they can be used
# for padding so we are now looking for the first byte
# that isn't an 0xFF, this will be the marker
while True :
result = fin.read(1)
if result != M_FF :
return result
raise Exception( 'Invalid JPEG' )
# BODY OF THE FUNCTION
if not ((fin.read(1) == M_FF) and (fin.read(1) == M_SOI)) :
raise Exception( 'Invalid Jpeg' )
while True :
marker = next_marker()
# the marker is always followed by two bytes representing the length of the data field
length = get_length ()
if length < 2 : raise Exception( "Erroneous JPEG marker length" )
# if it is a compression process marker then it will contain the dimension of the image
if marker in MARKERS :
# the next byte is the data precision, just skip it
fin.read(1)
# bingo
image_height = get_length()
image_width = get_length()
return image_width, image_height
# just skip whatever data it contains
fin.read( length - 2 )
raise Exception( 'Invalid JPEG, end of stream reached' )
_PNG_HEADER = '\x89\x50\x4e'
def _get_png_dimensions( data ) :
if data[0:3] != _PNG_HEADER :
raise Exception( 'Invalid PNG image' )
width = (ord(data[18]) * 256) + (ord(data[19]))
height = (ord(data[22]) * 256) + (ord(data[23]))
return width, height
def _get_emf_dimensions( fin ):
import struct
def get_DWORD():
return struct.unpack("<L",fin.read(4))[0]
def get_LONG():
return struct.unpack("<l",fin.read(4))[0]
def get_WORD():
return struct.unpack("<H",fin.read(2))[0]
class Empty:
pass
header = Empty()
header.RecordType = get_DWORD() # Record type
header.RecordSize = get_DWORD() # Size of the record in bytes
header.BoundsLeft = get_LONG() # Left inclusive bounds
header.BoundsTop = get_LONG() # Top inclusive bounds
header.BoundsRight = get_LONG() # Right inclusive bounds
header.BoundsBottom = get_LONG() # Bottom inclusive bounds
header.FrameLeft = get_LONG() # Left side of inclusive picture frame
header.FrameTop = get_LONG() # Top side of inclusive picture frame
header.FrameRight = get_LONG() # Right side of inclusive picture frame
header.FrameBottom = get_LONG() # Bottom side of inclusive picture frame
header.Signature = get_DWORD() # Signature ID (always 0x464D4520)
header.Version = get_DWORD() # Version of the metafile
header.Size = get_DWORD() # Size of the metafile in bytes
header.NumOfRecords = get_DWORD() # Number of records in the metafile
header.NumOfHandles = get_WORD() # Number of handles in the handle table
header.Reserved = get_WORD() # Not used (always 0)
header.SizeOfDescrip = get_DWORD() # Size of description string in WORDs
header.OffsOfDescrip = get_DWORD() # Offset of description string in metafile
header.NumPalEntries = get_DWORD() # Number of color palette entries
header.WidthDevPixels = get_LONG() # Width of reference device in pixels
header.HeightDevPixels = get_LONG() # Height of reference device in pixels
header.WidthDevMM = get_LONG() # Width of reference device in millimeters
header.HeightDevMM = get_LONG() # Height of reference device in millimeters
if 0:
klist = header.__dict__.keys()
klist.sort()
for k in klist:
print "%20s:%s" % (k,header.__dict__[k])
dw = header.FrameRight-header.FrameLeft
dh = header.FrameBottom-header.FrameTop
# convert from 0.01mm units to 1/72in units
return int(dw * 72.0/2540.0), int(dh * 72.0/2540.0)
class Image( RawCode ) :
# Need to add in the width and height in twips as it crashes
# word xp with these values. Still working out the most
# efficient way of getting these values.
# \picscalex100\picscaley100\piccropl0\piccropr0\piccropt0\piccropb0
# picwgoal900\pichgoal281
PNG_LIB = 'pngblip'
JPG_LIB = 'jpegblip'
EMF_LIB = 'emfblip'
PICT_TYPES = { 'png' : PNG_LIB,
'jpg' : JPG_LIB,
'emf' : EMF_LIB}
def __init__( self, infile, **kwargs ) :
if hasattr( infile, 'read' ):
fin = infile
if 'datatype' not in kwargs.keys():
msg = "If passing in a file object, you must also specify type='xxx' where xxx is one of %s" % self.PICT_TYPES.keys()
raise ValueError,msg
file_name = kwargs.pop('datatype')
else:
fin = file( infile, 'rb' )
file_name = infile
pict_type = self.PICT_TYPES[ file_name[ -3 : ].lower() ]
if pict_type == self.PNG_LIB :
width, height = _get_png_dimensions( fin.read( 100 ) )
elif pict_type == self.JPG_LIB :
width, height = _get_jpg_dimensions( fin )
elif pict_type == self.EMF_LIB :
width, height = _get_emf_dimensions( fin )
# if user specified height or width but not both, then
# scale unspecified dimension to maintain aspect ratio
if ('width' in kwargs) and ('height' not in kwargs):
height = int(height * float(kwargs['width'])/width)
elif ('height' in kwargs) and ('width' not in kwargs):
width = int(width * float(kwargs['height'])/height)
width = kwargs.pop('width',width)
height = kwargs.pop('height', height)
codes = [ pict_type,
'picwgoal%s' % (width * 20),
'pichgoal%s' % (height * 20) ]
# let user specify global scaling
scale = kwargs.pop('scale',100)
for kwarg, code, default in [ ( 'scale_x', 'scalex', scale ),
( 'scale_y', 'scaley', scale ),
( 'crop_left', 'cropl', '0' ),
( 'crop_right', 'cropr', '0' ),
( 'crop_top', 'cropt', '0' ),
( 'crop_bottom', 'cropb', '0' ) ] :
codes.append( 'pic%s%s' % ( code, kwargs.pop( kwarg, default ) ) )
# reset back to the start of the file to get all of it and now
# turn it into hex.
fin.seek( 0, 0 )
image = hexlify( fin.read() )
fin.close()
data = []
for i in range( 0, len( image ), 128 ) :
data.append( image[ i : i + 128 ] )
data = r'{\pict{\%s}%s}' % ( '\\'.join( codes ), '\n'.join( data ) )
RawCode.__init__( self, data )
def ToRawCode( self, var_name ) :
return '%s = RawCode( """%s""" )' % ( var_name, self.Data )
class Text :
def __init__( self, *params ) :
self.Data = None
self.Style = None
self.Properties = None
self.Shading = None
for param in params :
if isinstance( param, TextStyle ) : self.Style = param
elif isinstance( param, TextPS ) : self.Properties = param
elif isinstance( param, ShadingPS ) : self.Shading = param
else :
# otherwise let the rendering custom handler sort it out itself
self.Data = param
def SetData( self, value ) :
self.Data = value
class Inline( list ) :
def __init__( self, *params ) :
super( Inline, self ).__init__()
self.Style = None
self.Properties = None
self.Shading = None
self._append = super( Inline, self ).append
for param in params :
if isinstance( param, TextStyle ) : self.Style = param
elif isinstance( param, TextPS ) : self.Properties = param
elif isinstance( param, ShadingPS ) : self.Shading = param
else :
# otherwise we add to it to our list of elements and let
# the rendering custom handler sort it out itself.
self.append( param )
def append( self, *params ) :
# filter out any that are explicitly None
[ self._append( param ) for param in params if param is not None ]
class Paragraph( list ) :
def __init__( self, *params ) :
super( Paragraph, self ).__init__()
self.Style = None
self.Properties = None
self.Frame = None
self.Shading = None
self._append = super( Paragraph, self ).append
for param in params :
if isinstance( param, ParagraphStyle ) : self.Style = param
elif isinstance( param, ParagraphPS ) : self.Properties = param
elif isinstance( param, FramePS ) : self.Frame = param
elif isinstance( param, ShadingPS ) : self.Shading = param
else :
# otherwise we add to it to our list of elements and let
# the rendering custom handler sort it out itself.
self.append( param )
def append( self, *params ) :
# filter out any that are explicitly None
[ self._append( param ) for param in params if param is not None ]
def insert( self, index, value ) :
if value is not None :
super( Paragraph, self ).insert( index, value )
class Table :
LEFT = 1
RIGHT = 2
CENTER = 3
ALIGNMENT = [ LEFT, RIGHT, CENTER ]
NO_WRAPPING = 1
WRAP_AROUND = 2
WRAPPING = [ NO_WRAPPING, WRAP_AROUND ]
# trrh height of row, 0 means automatically adjust, use negative for an absolute
# trgaph is half of the space between a table cell in width, reduce this one
# to get a really tiny column
def __init__( self, *column_widths, **kwargs ) :
self.Rows = []
self.SetAlignment ( kwargs.pop( 'alignment', self.LEFT ) )
self.SetLeftOffset ( kwargs.pop( 'left_offset', None ) )
self.SetGapBetweenCells( kwargs.pop( 'gap_between_cells', None ) )
self.SetColumnWidths ( *column_widths )
assert not kwargs, 'invalid keyword args %s' % kwargs
def SetAlignment( self, value ) :
assert value is None or value in self.ALIGNMENT
self.Alignment = value or self.LEFT
return self
def SetLeftOffset( self, value ) :
self.LeftOffset = value
return self
def SetGapBetweenCells( self, value ) :
self.GapBetweenCells = value
return self
def SetColumnWidths( self, *column_widths ) :
self.ColumnWidths = column_widths
self.ColumnCount = len( column_widths )
return self
def AddRow( self, *cells ) :
height = None
if isinstance( cells[ 0 ], (IntType, FloatType, LongType) ):
height = int( cells[ 0 ] )
cells = cells[ 1 : ]
# make sure all of the spans add up to the number of columns
# otherwise the table will get corrupted
if self.ColumnCount != sum( [ cell.Span for cell in cells ] ) :
raise Exception( 'ColumnCount != the total of this row\'s cell.Spans.' )
self.Rows.append( ( height, cells ) )
append = AddRow
class Cell( list ) :
"""
\clvertalt Text is top-aligned in cell (the default).
\clvertalc Text is centered vertically in cell.
\clvertalb Text is bottom-aligned in cell.
\cltxlrtb Vertical text aligned left (direction bottom up).
\cltxtbrl Vertical text aligned right (direction top down).
"""
ALIGN_TOP = 1
ALIGN_CENTER = 2
ALIGN_BOTTOM = 3
FLOW_LR_TB = 1
FLOW_RL_TB = 2
FLOW_LR_BT = 3
FLOW_VERTICAL_LR_TB = 4
FLOW_VERTICAL_TB_RL = 5
def __init__( self, *params, **kwargs ) :
super( Cell, self ).__init__()
self.SetFrame ( None )
self.SetMargins( None )
self.SetAlignment( kwargs.get( 'alignment', self.ALIGN_TOP ) )
self.SetFlow ( kwargs.get( 'flow' , self.FLOW_LR_TB ) )
self.SetSpan ( kwargs.get( 'span', 1 ) )
self.SetStartVerticalMerge( kwargs.get( 'start_vertical_merge', False ) )
self.SetVerticalMerge ( kwargs.get( 'vertical_merge', False ) )
self._append = super( Cell, self ).append
for param in params :
if isinstance( param, StringType ) : self.append ( param )
elif isinstance( param, Paragraph ) : self.append ( param )
elif isinstance( param, FramePS ) : self.SetFrame ( param )
elif isinstance( param, MarginsPS ) : self.SetMargins( param )
def SetFrame( self, value ) :
self.Frame = value
return self
def SetMargins( self, value ) :
self.Margins = value
return self
def SetAlignment( self, value ) :
assert value in [ self.ALIGN_TOP, self.ALIGN_CENTER, self.ALIGN_BOTTOM ] #, self.ALIGN_TEXT_TOP_DOWN, self.ALIGN_TEXT_BOTTOM_UP ]
self.Alignment = value
def SetFlow( self, value ) :
assert value in [ self.FLOW_LR_TB, self.FLOW_RL_TB, self.FLOW_LR_BT, self.FLOW_VERTICAL_LR_TB, self.FLOW_VERTICAL_TB_RL ]
self.Flow = value
def SetSpan( self, value ) :
# must be a positive integer
self.Span = int( max( value, 1 ) )
return self
def SetStartVerticalMerge( self, value ) :
self.StartVerticalMerge = False
if value :
self.StartVerticalMerge = True
return self
def SetVerticalMerge( self, value ) :
self.VerticalMerge = False
if value :
self.VerticalMerge = True
return self
def append( self, *params ) :
[ self._append( param ) for param in params ]
class Document :
def __init__( self, style_sheet=None, default_language=None, view_kind=None, view_zoom_kind=None, view_scale=None ) :
self.StyleSheet = style_sheet or MakeDefaultStyleSheet()
self.Sections = AttributedList( Section )
self.SetTitle( None )
self.DefaultLanguage = default_language or Languages.DEFAULT
self.ViewKind = view_kind or ViewKind.DEFAULT
self.ViewZoomKind = view_zoom_kind
self.ViewScale = view_scale
def NewSection( self, *params, **kwargs ) :
result = Section( *params, **kwargs )
self.Sections.append( result )
return result
def SetTitle( self, value ) :
self.Title = value
return self
def Copy( self ) :
result = Document( style_sheet = self.StyleSheet.Copy(),
default_language = self.DefaultLanguage,
view_kind = self.ViewKind,
view_zoom_kind = self.ViewZoomKind,
view_scale = self.ViewScale )
result.SetTitle( self.Title )
result.Sections = self.Sections.Copy()
return result
def TEXT( *params, **kwargs ) :
text_props = TextPropertySet()
text_props.SetFont ( kwargs.get( 'font', None ) )
text_props.SetSize ( kwargs.get( 'size', None ) )
text_props.SetBold ( kwargs.get( 'bold', False ) )
text_props.SetItalic ( kwargs.get( 'italic', False ) )
text_props.SetUnderline( kwargs.get( 'underline', False ) )
text_props.SetColour ( kwargs.get( 'colour', None ) )
if len( params ) == 1 :
return Text( params[ 0 ], text_props )
result = Inline( text_props )
apply( result.append, params )
return result
def B( *params ) :
text_props = TextPropertySet( bold=True )
if len( params ) == 1 :
return Text( params[ 0 ], text_props )
result = Inline( text_props )
apply( result.append, params )
return result
def I( *params ) :
text_props = TextPropertySet( italic=True )
if len( params ) == 1 :
return Text( params[ 0 ], text_props )
result = Inline( text_props )
apply( result.append, params )
return result
def U( *params ) :
text_props = TextPropertySet( underline=True )
if len( params ) == 1 :
return Text( params[ 0 ], text_props )
result = Inline( text_props )
apply( result.append, params )
return result
| gpl-3.0 |
prop/titanium_mobile | support/common/csspacker.py | 108 | 2193 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# CSS Minification Script
# http://www.siafoo.net/snippet/16
# released in public domain
#
from __future__ import division
import sys
import getopt
import re
import string
def compress(input):
output = ''
while True:
open_c = input.find('/*')
if open_c == -1:
output += input
break;
output += input[ : open_c]
input = input[ open_c + 2 : ]
close_c = input.find('*/')
if close_c == -1:
#print 'Runaway comment detected'
return output
input = input[close_c + 2: ]
# Replace tab with space
output = output.replace('\t', ' ')
# Remove double spaces
output = re.sub('\s{2,}', ' ', output)
# Remove spaces around stuff
output = re.sub('\s*;+\s*', ';', output)
output = re.sub('\s*:\s*', ':', output)
output = re.sub('\s*{\s*', '{', output)
output = re.sub('\s*}\s*', '}', output)
# Remove unecessary semicolon
output = output.replace(';}', '}')
# Split the directives on per line
output = output.replace('}', '}\n')
output = output.strip()
output = remove_dead(output)
output = shorten_colors(output)
# Remove all the newlines
output = output.replace('\n', '')
return output
def remove_dead(input):
output = ''
for line in input.splitlines(True):
if not re.search('([\.#][\w_]*{})', line):
output += line
return output
def shorten_colors(input):
output = ''
p = re.compile(':#([A-Fa-f0-9]{6})')
for line in input.splitlines(True):
m = p.search(line)
if m is not None:
old_c = m.group(1)
if old_c[0] == old_c[1] and old_c[2] == old_c[3] and old_c[4] == old_c[5]:
new_c = old_c[0] + old_c[2] + old_c[4]
output += line.replace(old_c, new_c)
continue
output += line
return output
class CSSPacker(object):
def __init__(self,contents):
self.contents = contents
def pack(self):
return compress(self.contents)
| apache-2.0 |
ausdim/SGS3-JB-U8 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
HailStorm32/Q.bo_stacks | qbo_stereo_anaglyph/hrl_lib/src/hrl_lib/msg/_Pose3DOF.py | 1 | 5992 | """autogenerated by genpy from hrl_lib/Pose3DOF.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class Pose3DOF(genpy.Message):
_md5sum = "646ead44a0e6fecf4e14ca116f12b08b"
_type = "hrl_lib/Pose3DOF"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
float64 x
float64 y
float64 theta
float64 dt
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.secs: seconds (stamp_secs) since epoch
# * stamp.nsecs: nanoseconds since stamp_secs
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
"""
__slots__ = ['header','x','y','theta','dt']
_slot_types = ['std_msgs/Header','float64','float64','float64','float64']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,x,y,theta,dt
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Pose3DOF, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.x is None:
self.x = 0.
if self.y is None:
self.y = 0.
if self.theta is None:
self.theta = 0.
if self.dt is None:
self.dt = 0.
else:
self.header = std_msgs.msg.Header()
self.x = 0.
self.y = 0.
self.theta = 0.
self.dt = 0.
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_4d.pack(_x.x, _x.y, _x.theta, _x.dt))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 32
(_x.x, _x.y, _x.theta, _x.dt,) = _struct_4d.unpack(str[start:end])
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_4d = struct.Struct("<4d")
| lgpl-2.1 |
awanke/weave | test/scheduler/main.py | 11 | 3831 | import collections
import json
import logging
import operator
import re
import flask
from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
app = flask.Flask('scheduler')
app.debug = True
# We use exponential moving average to record
# test run times. Higher alpha discounts historic
# observations faster.
alpha = 0.3
PROJECT = 'positive-cocoa-90213'
ZONE = 'us-central1-a'
class Test(ndb.Model):
total_run_time = ndb.FloatProperty(default=0.) # Not total, but a EWMA
total_runs = ndb.IntegerProperty(default=0)
class Schedule(ndb.Model):
shards = ndb.JsonProperty()
@app.route('/record/<path:test_name>/<runtime>', methods=['POST'])
@ndb.transactional
def record(test_name, runtime):
test = Test.get_by_id(test_name)
if test is None:
test = Test(id=test_name)
test.total_run_time = (test.total_run_time * (1-alpha)) + (float(runtime) * alpha)
test.total_runs += 1
test.put()
return ('', 204)
@app.route('/schedule/<test_run>/<int:shard_count>/<int:shard>', methods=['POST'])
def schedule(test_run, shard_count, shard):
# read tests from body
test_names = flask.request.get_json(force=True)['tests']
# first see if we have a scedule already
schedule_id = "%s-%d" % (test_run, shard_count)
schedule = Schedule.get_by_id(schedule_id)
if schedule is not None:
return flask.json.jsonify(tests=schedule.shards[str(shard)])
# if not, do simple greedy algorithm
test_times = ndb.get_multi(ndb.Key(Test, test_name) for test_name in test_names)
def avg(test):
if test is not None:
return test.total_run_time
return 1
test_times = [(test_name, avg(test)) for test_name, test in zip(test_names, test_times)]
test_times_dict = dict(test_times)
test_times.sort(key=operator.itemgetter(1))
shards = {i: [] for i in xrange(shard_count)}
while test_times:
test_name, time = test_times.pop()
# find shortest shard and put it in that
s, _ = min(((i, sum(test_times_dict[t] for t in shards[i]))
for i in xrange(shard_count)), key=operator.itemgetter(1))
shards[s].append(test_name)
# atomically insert or retrieve existing schedule
schedule = Schedule.get_or_insert(schedule_id, shards=shards)
return flask.json.jsonify(tests=schedule.shards[str(shard)])
NAME_RE = re.compile(r'^host(?P<index>\d+)-(?P<build>\d+)-(?P<shard>\d+)$')
@app.route('/tasks/gc')
def gc():
# Get list of running VMs, pick build id out of VM name
credentials = GoogleCredentials.get_application_default()
compute = discovery.build('compute', 'v1', credentials=credentials)
instances = compute.instances().list(project=PROJECT, zone=ZONE).execute()
host_by_build = collections.defaultdict(list)
for instance in instances['items']:
matches = NAME_RE.match(instance['name'])
if matches is None:
continue
host_by_build[int(matches.group('build'))].append(instance['name'])
logging.info("Running VMs by build: %r", host_by_build)
# Get list of builds, filter down to runnning builds
result = urlfetch.fetch('https://circleci.com/api/v1/project/weaveworks/weave',
headers={'Accept': 'application/json'})
assert result.status_code == 200
builds = json.loads(result.content)
running = {build['build_num'] for build in builds if build['status'] == 'running'}
logging.info("Runnings builds: %r", running)
# Stop VMs for builds that aren't running
stopped = []
for build, names in host_by_build.iteritems():
if build in running:
continue
for name in names:
stopped.append(name)
logging.info("Stopping VM %s", name)
compute.instances().delete(project=PROJECT, zone=ZONE, instance=name).execute()
return (flask.json.jsonify(running=list(running), stopped=stopped), 200)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.