repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
VagrantApe/flaskMicroblog | venv/lib/python2.7/site-packages/jinja2/visitor.py | 1401 | 3316 | # -*- coding: utf-8 -*-
"""
jinja2.visitor
~~~~~~~~~~~~~~
This module implements a visitor for the nodes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from jinja2.nodes import Node
class NodeVisitor(object):
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
method = 'visit_' + node.__class__.__name__
return getattr(self, method, None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
| bsd-3-clause |
evanscottgray/ryu | ryu/contrib/ncclient/operations/retrieve.py | 64 | 3714 | # Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rpc import RPC, RPCReply
from ncclient.xml_ import *
import util
class GetReply(RPCReply):
"""Adds attributes for the *data* element to `RPCReply`."""
def _parsing_hook(self, root):
self._data = None
if not self._errors:
self._data = root.find(qualify("data"))
@property
def data_ele(self):
"*data* element as an :class:`~xml.etree.ElementTree.Element`"
if not self._parsed:
self.parse()
return self._data
@property
def data_xml(self):
"*data* element as an XML string"
if not self._parsed:
self.parse()
return to_xml(self._data)
data = data_ele
"Same as :attr:`data_ele`"
class Get(RPC):
"The *get* RPC."
REPLY_CLS = GetReply
"See :class:`GetReply`."
def request(self, filter=None):
"""Retrieve running configuration and device state information.
*filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:seealso: :ref:`filter_params`
"""
node = new_ele("get")
if filter is not None:
node.append(util.build_filter(filter))
return self._request(node)
class GetConfig(RPC):
"The *get-config* RPC."
REPLY_CLS = GetReply
"See :class:`GetReply`."
def request(self, source, filter=None):
"""Retrieve all or part of a specified configuration.
*source* name of the configuration datastore being queried
*filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:seealso: :ref:`filter_params`"""
node = new_ele("get-config")
node.append(util.datastore_or_url("source", source, self._assert))
if filter is not None:
node.append(util.build_filter(filter))
return self._request(node)
class Dispatch(RPC):
"Generic retrieving wrapper"
REPLY_CLS = GetReply
"See :class:`GetReply`."
def request(self, rpc_command, source=None, filter=None):
"""
*rpc_command* specifies rpc command to be dispatched either in plain text or in xml element format (depending on command)
*source* name of the configuration datastore being queried
*filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:seealso: :ref:`filter_params`
Examples of usage::
dispatch('clear-arp-table')
or dispatch element like ::
xsd_fetch = new_ele('get-xnm-information')
sub_ele(xsd_fetch, 'type').text="xml-schema"
sub_ele(xsd_fetch, 'namespace').text="junos-configuration"
dispatch(xsd_fetch)
"""
if ET.iselement(rpc_command):
node = rpc_command
else:
node = new_ele(rpc_command)
if source is not None:
node.append(util.datastore_or_url("source", source, self._assert))
if filter is not None:
node.append(util.build_filter(filter))
return self._request(node)
| apache-2.0 |
minhlongdo/scipy | scipy/sparse/dia.py | 53 | 8994 | """Sparse DIAgonal format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['dia_matrix', 'isspmatrix_dia']
import numpy as np
from .base import isspmatrix, _formats
from .data import _data_matrix
from .sputils import isshape, upcast_char, getdtype, get_index_dtype
from ._sparsetools import dia_matvec
class dia_matrix(_data_matrix):
"""Sparse matrix with DIAgonal storage
This can be instantiated in several ways:
dia_matrix(D)
with a dense matrix
dia_matrix(S)
with another sparse matrix S (equivalent to S.todia())
dia_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N),
dtype is optional, defaulting to dtype='d'.
dia_matrix((data, offsets), shape=(M, N))
where the ``data[k,:]`` stores the diagonal entries for
diagonal ``offsets[k]`` (See example below)
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
DIA format data array of the matrix
offsets
DIA format offset array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dia_matrix
>>> dia_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> data = np.array([[1, 2, 3, 4]]).repeat(3, axis=0)
>>> offsets = np.array([0, -1, 2])
>>> dia_matrix((data, offsets), shape=(4, 4)).toarray()
array([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isspmatrix_dia(arg1):
if copy:
arg1 = arg1.copy()
self.data = arg1.data
self.offsets = arg1.offsets
self.shape = arg1.shape
elif isspmatrix(arg1):
if isspmatrix_dia(arg1) and copy:
A = arg1.copy()
else:
A = arg1.todia()
self.data = A.data
self.offsets = A.offsets
self.shape = A.shape
elif isinstance(arg1, tuple):
if isshape(arg1):
# It's a tuple of matrix dimensions (M, N)
# create empty matrix
self.shape = arg1 # spmatrix checks for errors here
self.data = np.zeros((0,0), getdtype(dtype, default=float))
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.offsets = np.zeros((0), dtype=idx_dtype)
else:
try:
# Try interpreting it as (data, offsets)
data, offsets = arg1
except:
raise ValueError('unrecognized form for dia_matrix constructor')
else:
if shape is None:
raise ValueError('expected a shape argument')
self.data = np.atleast_2d(np.array(arg1[0], dtype=dtype, copy=copy))
self.offsets = np.atleast_1d(np.array(arg1[1],
dtype=get_index_dtype(maxval=max(shape)),
copy=copy))
self.shape = shape
else:
#must be dense, convert to COO first, then to DIA
try:
arg1 = np.asarray(arg1)
except:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format)
from .coo import coo_matrix
A = coo_matrix(arg1, dtype=dtype).todia()
self.data = A.data
self.offsets = A.offsets
self.shape = A.shape
if dtype is not None:
self.data = self.data.astype(dtype)
#check format
if self.offsets.ndim != 1:
raise ValueError('offsets array must have rank 1')
if self.data.ndim != 2:
raise ValueError('data array must have rank 2')
if self.data.shape[0] != len(self.offsets):
raise ValueError('number of diagonals (%d) '
'does not match the number of offsets (%d)'
% (self.data.shape[0], len(self.offsets)))
if len(np.unique(self.offsets)) != len(self.offsets):
raise ValueError('offset array contains duplicate values')
def __repr__(self):
nnz = self.getnnz()
format = self.getformat()
return "<%dx%d sparse matrix of type '%s'\n" \
"\twith %d stored elements (%d diagonals) in %s format>" % \
(self.shape + (self.dtype.type, nnz, self.data.shape[0],
_formats[format][1],))
def getnnz(self):
"""number of nonzero values
explicit zero values are included in this number
"""
M,N = self.shape
nnz = 0
for k in self.offsets:
if k > 0:
nnz += min(M,N-k)
else:
nnz += min(M+k,N)
return int(nnz)
nnz = property(fget=getnnz)
def _mul_vector(self, other):
x = other
y = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
x.dtype.char))
L = self.data.shape[1]
M,N = self.shape
dia_matvec(M,N, len(self.offsets), L, self.offsets, self.data, x.ravel(), y.ravel())
return y
def _mul_multimatrix(self, other):
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
def _setdiag(self, values, k=0):
M, N = self.shape
if values.ndim == 0:
# broadcast
values_n = np.inf
else:
values_n = len(values)
if k < 0:
n = min(M + k, N, values_n)
min_index = 0
max_index = n
else:
n = min(M, N - k, values_n)
min_index = k
max_index = k + n
if values.ndim != 0:
# allow also longer sequences
values = values[:n]
if k in self.offsets:
self.data[self.offsets == k, min_index:max_index] = values
else:
self.offsets = np.append(self.offsets, self.offsets.dtype.type(k))
m = max(max_index, self.data.shape[1])
data = np.zeros((self.data.shape[0]+1, m), dtype=self.data.dtype)
data[:-1,:self.data.shape[1]] = self.data
data[-1, min_index:max_index] = values
self.data = data
def todia(self,copy=False):
if copy:
return self.copy()
else:
return self
def transpose(self):
num_rows, num_cols = self.shape
max_dim = max(self.shape)
# flip diagonal offsets
offsets = -self.offsets
# re-align the data matrix
r = np.arange(len(offsets), dtype=np.intc)[:,None]
c = np.arange(num_rows, dtype=np.intc) - (offsets % max_dim)[:,None]
pad_amount = max(0, max_dim-self.data.shape[1])
data = np.hstack((self.data, np.zeros((self.data.shape[0], pad_amount),
dtype=self.data.dtype)))
data = data[r,c]
return dia_matrix((data, offsets), shape=(num_cols,num_rows))
def tocsr(self):
#this could be faster
return self.tocoo().tocsr()
def tocsc(self):
#this could be faster
return self.tocoo().tocsc()
def tocoo(self):
num_rows, num_cols = self.shape
num_offsets, offset_len = self.data.shape
offset_inds = np.arange(offset_len)
row = offset_inds - self.offsets[:,None]
mask = (row >= 0)
mask &= (row < num_rows)
mask &= (offset_inds < num_cols)
mask &= (self.data != 0)
row = row[mask]
col = np.tile(offset_inds, num_offsets)[mask.ravel()]
data = self.data[mask]
from .coo import coo_matrix
return coo_matrix((data,(row,col)), shape=self.shape)
# needed by _data_matrix
def _with_data(self, data, copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays are copied.
"""
if copy:
return dia_matrix((data, self.offsets.copy()), shape=self.shape)
else:
return dia_matrix((data,self.offsets), shape=self.shape)
def isspmatrix_dia(x):
return isinstance(x, dia_matrix)
| bsd-3-clause |
WhireCrow/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/usr/lib/python2.7/zipfile.py | 28 | 54059 | """
Read and write ZIP files.
"""
import struct, os, time, sys, shutil
import binascii, cStringIO, stat
import io
import re
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
class BadZipfile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile # The exception raised by this module
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = "<4s4H2LH"
stringEndArchive = "PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = "PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = "PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = "PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = "PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if data[0:4] == stringEndArchive and data[-2:] == "\000\000":
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append("")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
if isinstance(self.filename, unicode):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
else:
return self.filename, self.flag_bits
def _decodeFilename(self):
if self.flag_bits & 0x800:
return self.filename.decode('utf-8')
else:
return self.filename
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError, "Corrupt extra field %s"%(ln,)
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFFL:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffffL:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
def __call__(self, c):
"""Decrypt a single character."""
c = ord(c)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
c = chr(c)
self._UpdateKeys(c)
return c
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(r'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None):
self._fileobj = fileobj
self._decrypter = decrypter
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
self._compress_left = zipinfo.compress_size
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
self._unconsumed = ''
self._readbuffer = ''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find('\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = ''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == '':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + '\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = ''
if n is None:
n = -1
while True:
if n < 0:
data = self.read1(n)
elif n > len(buf):
data = self.read1(n - len(buf))
else:
return buf
if len(data) == 0:
return buf
buf += data
def _update_crc(self, newdata, eof):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if eof and self._running_crc != self._expected_crc:
raise BadZipfile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = ''.join(map(self._decrypter, data))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data
class ZipFile:
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self.comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
if key == 'r':
self._GetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
if not self._filePassed:
self.fp.close()
self.fp = None
raise RuntimeError, 'Mode must be "r", "w" or "a"'
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _GetContents(self):
"""Read the directory, making sure we close the file if the format
is bad."""
try:
self._RealGetContents()
except BadZipfile:
if not self._filePassed:
self.fp.close()
self.fp = None
raise
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipfile("File is not a zip file")
if not endrec:
raise BadZipfile, "File is not a zip file"
if self.debug > 1:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self.comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if centdir[0:4] != stringCentralDir:
raise BadZipfile, "Bad magic number for central directory"
centdir = struct.unpack(structCentralDir, centdir)
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print "total", total
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
f = self.open(zinfo.filename, "r")
while f.read(chunk_size): # Check CRC-32
pass
except BadZipfile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
self.pwd = pwd
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
if not self.fp:
raise RuntimeError, \
"Attempt to read ZIP archive that was already closed"
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
else:
zef_file = open(self.filename, 'rb')
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if fheader[0:4] != stringFileHeader:
raise BadZipfile, "Bad magic number for file header"
fheader = struct.unpack(structFileHeader, fheader)
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
raise BadZipfile, \
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError, "File %s is encrypted, " \
"password required for extraction" % name
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
bytes = zef_file.read(12)
h = map(zd, bytes[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if ord(h[11]) != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd)
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
# Strip trailing path separator, unless it represents the root.
if (targetpath[-1:] in (os.path.sep, os.path.altsep)
and len(os.path.splitdrive(targetpath)[1]) > 1):
targetpath = targetpath[:-1]
# don't include leading "/" from file name if present
if member.filename[0] == '/':
targetpath = os.path.join(targetpath, member.filename[1:])
else:
targetpath = os.path.join(targetpath, member.filename)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
source = self.open(member, pwd=pwd)
target = file(targetpath, "wb")
shutil.copyfileobj(source, target)
source.close()
target.close()
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader())
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
zinfo.file_size = file_size = 0
self.fp.write(zinfo.FileHeader())
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
# Seek backwards and write CRC and file sizes
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset + 14, 0)
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
"""Write a file into the archive. The contents is the string
'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self.fp.write(zinfo.FileHeader())
self.fp.write(bytes)
self.fp.flush()
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
self.fp.write(struct.pack("<LLL", zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffffL
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print >>sys.stderr, (structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
# check for valid comment length
if len(self.comment) >= ZIP_MAX_COMMENT:
if self.debug > 0:
msg = 'Archive comment is too long; truncating to %d bytes' \
% ZIP_MAX_COMMENT
self.comment = self.comment[:ZIP_MAX_COMMENT]
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self.comment))
self.fp.write(endrec)
self.fp.write(self.comment)
self.fp.flush()
if not self._filePassed:
self.fp.close()
self.fp = None
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print "Compiling", file_py
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError,err:
print err.msg
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print USAGE
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
zf.printdir()
zf.close()
elif args[0] == '-t':
if len(args) != 2:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print "Done testing"
elif args[0] == '-e':
if len(args) != 3:
print USAGE
sys.exit(1)
zf = ZipFile(args[1], 'r')
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
zf.close()
elif args[0] == '-c':
if len(args) < 3:
print USAGE
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
zf = ZipFile(args[1], 'w', allowZip64=True)
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
if __name__ == "__main__":
main()
| gpl-2.0 |
halvertoluke/edx-platform | common/test/acceptance/tests/studio/test_studio_outline.py | 27 | 81678 | """
Acceptance tests for studio related to the outline page.
"""
import json
from datetime import datetime, timedelta
import itertools
from pytz import UTC
from bok_choy.promise import EmptyPromise
from nose.plugins.attrib import attr
from ...pages.studio.settings_advanced import AdvancedSettingsPage
from ...pages.studio.overview import CourseOutlinePage, ContainerPage, ExpandCollapseLinkState
from ...pages.studio.utils import add_discussion, drag, verify_ordering
from ...pages.lms.courseware import CoursewarePage
from ...pages.lms.course_nav import CourseNavPage
from ...pages.lms.staff_view import StaffPage
from ...fixtures.config import ConfigModelFixture
from ...fixtures.course import XBlockFixtureDesc
from base_studio_test import StudioCourseTest
from ..helpers import load_data_str
from ...pages.lms.progress import ProgressPage
SECTION_NAME = 'Test Section'
SUBSECTION_NAME = 'Test Subsection'
UNIT_NAME = 'Test Unit'
class CourseOutlineTest(StudioCourseTest):
"""
Base class for all course outline tests
"""
def setUp(self):
"""
Install a course with no content using a fixture.
"""
super(CourseOutlineTest, self).setUp()
self.course_outline_page = CourseOutlinePage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
self.advanced_settings = AdvancedSettingsPage(
self.browser, self.course_info['org'], self.course_info['number'], self.course_info['run']
)
def populate_course_fixture(self, course_fixture):
""" Install a course with sections/problems, tabs, updates, and handouts """
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME).add_children(
XBlockFixtureDesc('problem', 'Test Problem 1', data=load_data_str('multiple_choice.xml')),
XBlockFixtureDesc('html', 'Test HTML Component'),
XBlockFixtureDesc('discussion', 'Test Discussion Component')
)
)
)
)
def do_action_and_verify(self, outline_page, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
if outline_page is None:
outline_page = self.course_outline_page.visit()
action(outline_page)
verify_ordering(self, outline_page, expected_ordering)
# Reload the page and expand all subsections to see that the change was persisted.
outline_page = self.course_outline_page.visit()
outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').click()
verify_ordering(self, outline_page, expected_ordering)
@attr('shard_3')
class CourseOutlineDragAndDropTest(CourseOutlineTest):
"""
Tests of drag and drop within the outline page.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Create a course with one section, two subsections, and four units
"""
# with collapsed outline
self.chap_1_handle = 0
self.chap_1_seq_1_handle = 1
# with first sequential expanded
self.seq_1_vert_1_handle = 2
self.seq_1_vert_2_handle = 3
self.chap_1_seq_2_handle = 4
course_fixture.add_children(
XBlockFixtureDesc('chapter', "1").add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def drag_and_verify(self, source, target, expected_ordering, outline_page=None):
self.do_action_and_verify(
outline_page,
lambda (outline): drag(outline, source, target),
expected_ordering
)
def test_drop_unit_in_collapsed_subsection(self):
"""
Drag vertical "1.1.2" from subsection "1.1" into collapsed subsection "1.2" which already
have its own verticals.
"""
course_outline_page = self.course_outline_page.visit()
# expand first subsection
course_outline_page.q(css='.outline-item.outline-subsection.is-collapsed .ui-toggle-expansion').first.click()
expected_ordering = [{"1": ["1.1", "1.2"]},
{"1.1": ["1.1.1"]},
{"1.2": ["1.1.2", "1.2.1", "1.2.2"]}]
self.drag_and_verify(self.seq_1_vert_2_handle, self.chap_1_seq_2_handle, expected_ordering, course_outline_page)
@attr('shard_3')
class WarningMessagesTest(CourseOutlineTest):
"""
Feature: Warning messages on sections, subsections, and units
"""
__test__ = True
STAFF_ONLY_WARNING = 'Contains staff only content'
LIVE_UNPUBLISHED_WARNING = 'Unpublished changes to live content'
FUTURE_UNPUBLISHED_WARNING = 'Unpublished changes to content that will release in the future'
NEVER_PUBLISHED_WARNING = 'Unpublished units will not be released'
class PublishState(object):
"""
Default values for representing the published state of a unit
"""
NEVER_PUBLISHED = 1
UNPUBLISHED_CHANGES = 2
PUBLISHED = 3
VALUES = [NEVER_PUBLISHED, UNPUBLISHED_CHANGES, PUBLISHED]
class UnitState(object):
""" Represents the state of a unit """
def __init__(self, is_released, publish_state, is_locked):
""" Creates a new UnitState with the given properties """
self.is_released = is_released
self.publish_state = publish_state
self.is_locked = is_locked
@property
def name(self):
""" Returns an appropriate name based on the properties of the unit """
result = "Released " if self.is_released else "Unreleased "
if self.publish_state == WarningMessagesTest.PublishState.NEVER_PUBLISHED:
result += "Never Published "
elif self.publish_state == WarningMessagesTest.PublishState.UNPUBLISHED_CHANGES:
result += "Unpublished Changes "
else:
result += "Published "
result += "Locked" if self.is_locked else "Unlocked"
return result
def populate_course_fixture(self, course_fixture):
""" Install a course with various configurations that could produce warning messages """
# Define the dimensions that map to the UnitState constructor
features = [
[True, False], # Possible values for is_released
self.PublishState.VALUES, # Possible values for publish_state
[True, False] # Possible values for is_locked
]
# Add a fixture for every state in the product of features
course_fixture.add_children(*[
self._build_fixture(self.UnitState(*state)) for state in itertools.product(*features)
])
def _build_fixture(self, unit_state):
""" Returns an XBlockFixtureDesc with a section, subsection, and possibly unit that has the given state. """
name = unit_state.name
start = (datetime(1984, 3, 4) if unit_state.is_released else datetime.now(UTC) + timedelta(1)).isoformat()
subsection = XBlockFixtureDesc('sequential', name, metadata={'start': start})
# Children of never published subsections will be added on demand via _ensure_unit_present
return XBlockFixtureDesc('chapter', name).add_children(
subsection if unit_state.publish_state == self.PublishState.NEVER_PUBLISHED
else subsection.add_children(
XBlockFixtureDesc('vertical', name, metadata={
'visible_to_staff_only': True if unit_state.is_locked else None
})
)
)
def test_released_never_published_locked(self):
""" Tests that released never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_never_published_unlocked(self):
""" Tests that released never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_released_unpublished_changes_locked(self):
""" Tests that released unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_unpublished_changes_unlocked(self):
""" Tests that released unpublished changes unlocked units display 'Unpublished changes to live content' """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.LIVE_UNPUBLISHED_WARNING
)
def test_released_published_locked(self):
""" Tests that released published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_released_published_unlocked(self):
""" Tests that released published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=True, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def test_unreleased_never_published_locked(self):
""" Tests that unreleased never published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_never_published_unlocked(self):
""" Tests that unreleased never published unlocked units display 'Unpublished units will not be released' """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.NEVER_PUBLISHED, is_locked=False),
self.NEVER_PUBLISHED_WARNING
)
def test_unreleased_unpublished_changes_locked(self):
""" Tests that unreleased unpublished changes locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_unpublished_changes_unlocked(self):
"""
Tests that unreleased unpublished changes unlocked units display 'Unpublished changes to content that will
release in the future'
"""
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.UNPUBLISHED_CHANGES, is_locked=False),
self.FUTURE_UNPUBLISHED_WARNING
)
def test_unreleased_published_locked(self):
""" Tests that unreleased published locked units display staff only warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=True),
self.STAFF_ONLY_WARNING
)
def test_unreleased_published_unlocked(self):
""" Tests that unreleased published unlocked units display no warnings """
self._verify_unit_warning(
self.UnitState(is_released=False, publish_state=self.PublishState.PUBLISHED, is_locked=False),
None
)
def _verify_unit_warning(self, unit_state, expected_status_message):
"""
Verifies that the given unit's messages match the expected messages.
If expected_status_message is None, then the unit status message is expected to not be present.
"""
self._ensure_unit_present(unit_state)
self.course_outline_page.visit()
section = self.course_outline_page.section(unit_state.name)
subsection = section.subsection_at(0)
subsection.expand_subsection()
unit = subsection.unit_at(0)
if expected_status_message == self.STAFF_ONLY_WARNING:
self.assertEqual(section.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(subsection.status_message, self.STAFF_ONLY_WARNING)
self.assertEqual(unit.status_message, self.STAFF_ONLY_WARNING)
else:
self.assertFalse(section.has_status_message)
self.assertFalse(subsection.has_status_message)
if expected_status_message:
self.assertEqual(unit.status_message, expected_status_message)
else:
self.assertFalse(unit.has_status_message)
def _ensure_unit_present(self, unit_state):
""" Ensures that a unit with the given state is present on the course outline """
if unit_state.publish_state == self.PublishState.PUBLISHED:
return
name = unit_state.name
self.course_outline_page.visit()
subsection = self.course_outline_page.section(name).subsection(name)
subsection.expand_subsection()
if unit_state.publish_state == self.PublishState.UNPUBLISHED_CHANGES:
unit = subsection.unit(name).go_to()
add_discussion(unit)
elif unit_state.publish_state == self.PublishState.NEVER_PUBLISHED:
subsection.add_unit()
unit = ContainerPage(self.browser, None)
unit.wait_for_page()
if unit.is_staff_locked != unit_state.is_locked:
unit.toggle_staff_lock()
@attr('shard_3')
class EditingSectionsTest(CourseOutlineTest):
"""
Feature: Editing Release date, Due date and grading type.
"""
__test__ = True
def test_can_edit_subsection(self):
"""
Scenario: I can edit settings of subsection.
Given that I have created a subsection
Then I see release date, due date and grading policy of subsection in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date, due date and grading policy fields present
And they have correct initial values
Then I set new values for these fields
And I click save button on the modal
Then I see release date, due date and grading policy of subsection in course outline
"""
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(subsection.release_date)
# Verify that Due date and Policy hidden by default
self.assertFalse(subsection.due_date)
self.assertFalse(subsection.policy)
modal = subsection.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertTrue(modal.has_release_time())
self.assertTrue(modal.has_due_date())
self.assertTrue(modal.has_due_time())
self.assertTrue(modal.has_policy())
# Verify initial values
self.assertEqual(modal.release_date, u'1/1/1970')
self.assertEqual(modal.release_time, u'00:00')
self.assertEqual(modal.due_date, u'')
self.assertEqual(modal.due_time, u'')
self.assertEqual(modal.policy, u'Not Graded')
# Set new values
modal.release_date = '3/12/1972'
modal.release_time = '04:01'
modal.due_date = '7/21/2014'
modal.due_time = '23:39'
modal.policy = 'Lab'
modal.save()
self.assertIn(u'Released: Mar 12, 1972', subsection.release_date)
self.assertIn(u'04:01', subsection.release_date)
self.assertIn(u'Due: Jul 21, 2014', subsection.due_date)
self.assertIn(u'23:39', subsection.due_date)
self.assertIn(u'Lab', subsection.policy)
def test_can_edit_section(self):
"""
Scenario: I can edit settings of section.
Given that I have created a section
Then I see release date of section in course outline
When I click on the configuration icon
Then edit modal window is shown
And release date field present
And it has correct initial value
Then I set new value for this field
And I click save button on the modal
Then I see release date of section in course outline
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
# Verify that Release date visible by default
self.assertTrue(section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
modal = section.edit()
# Verify fields
self.assertTrue(modal.has_release_date())
self.assertFalse(modal.has_due_date())
self.assertFalse(modal.has_policy())
# Verify initial value
self.assertEqual(modal.release_date, u'1/1/1970')
# Set new value
modal.release_date = '5/14/1969'
modal.save()
self.assertIn(u'Released: May 14, 1969', section.release_date)
# Verify that Due date and Policy are not present
self.assertFalse(section.due_date)
self.assertFalse(section.policy)
def test_subsection_is_graded_in_lms(self):
"""
Scenario: I can grade subsection from course outline page.
Given I visit progress page
And I see that problem in subsection has grading type "Practice"
Then I visit course outline page
And I click on the configuration icon of subsection
And I set grading policy to "Lab"
And I click save button on the modal
Then I visit progress page
And I see that problem in subsection has grading type "Problem"
"""
progress_page = ProgressPage(self.browser, self.course_id)
progress_page.visit()
progress_page.wait_for_page()
self.assertEqual(u'Practice', progress_page.grading_formats[0])
self.course_outline_page.visit()
subsection = self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME)
modal = subsection.edit()
# Set new values
modal.policy = 'Lab'
modal.save()
progress_page.visit()
self.assertEqual(u'Problem', progress_page.grading_formats[0])
def test_unchanged_release_date_is_not_saved(self):
"""
Scenario: Saving a subsection without changing the release date will not override the release date
Given that I have created a section with a subsection
When I open the settings modal for the subsection
And I pressed save
And I open the settings modal for the section
And I change the release date to 07/20/1969
And I press save
Then the subsection and the section have the release date 07/20/1969
"""
self.course_outline_page.visit()
modal = self.course_outline_page.section_at(0).subsection_at(0).edit()
modal.save()
modal = self.course_outline_page.section_at(0).edit()
modal.release_date = '7/20/1969'
modal.save()
release_text = 'Released: Jul 20, 1969'
self.assertIn(release_text, self.course_outline_page.section_at(0).release_date)
self.assertIn(release_text, self.course_outline_page.section_at(0).subsection_at(0).release_date)
@attr('shard_3')
class StaffLockTest(CourseOutlineTest):
"""
Feature: Sections, subsections, and units can be locked and unlocked from the course outline.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Create a course with one section, two subsections, and four units """
course_fixture.add_children(
XBlockFixtureDesc('chapter', '1').add_children(
XBlockFixtureDesc('sequential', '1.1').add_children(
XBlockFixtureDesc('vertical', '1.1.1'),
XBlockFixtureDesc('vertical', '1.1.2')
),
XBlockFixtureDesc('sequential', '1.2').add_children(
XBlockFixtureDesc('vertical', '1.2.1'),
XBlockFixtureDesc('vertical', '1.2.2')
)
)
)
def _verify_descendants_are_staff_only(self, item):
"""Verifies that all the descendants of item are staff only"""
self.assertTrue(item.is_staff_only)
if hasattr(item, 'children'):
for child in item.children():
self._verify_descendants_are_staff_only(child)
def _remove_staff_lock_and_verify_warning(self, outline_item, expect_warning):
"""Removes staff lock from a course outline item and checks whether or not a warning appears."""
modal = outline_item.edit()
modal.is_explicitly_locked = False
if expect_warning:
self.assertTrue(modal.shows_staff_lock_warning())
else:
self.assertFalse(modal.shows_staff_lock_warning())
modal.save()
def _toggle_lock_on_unlocked_item(self, outline_item):
"""Toggles outline_item's staff lock on and then off, verifying the staff lock warning"""
self.assertFalse(outline_item.has_staff_lock_warning)
outline_item.set_staff_lock(True)
self.assertTrue(outline_item.has_staff_lock_warning)
self._verify_descendants_are_staff_only(outline_item)
outline_item.set_staff_lock(False)
self.assertFalse(outline_item.has_staff_lock_warning)
def _verify_explicit_staff_lock_remains_after_unlocking_parent(self, child_item, parent_item):
"""Verifies that child_item's explicit staff lock remains after removing parent_item's staff lock"""
child_item.set_staff_lock(True)
parent_item.set_staff_lock(True)
self.assertTrue(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
parent_item.set_staff_lock(False)
self.assertFalse(parent_item.has_staff_lock_warning)
self.assertTrue(child_item.has_staff_lock_warning)
def test_units_can_be_locked(self):
"""
Scenario: Units can be locked and unlocked from the course outline page
Given I have a course with a unit
When I click on the configuration icon
And I enable explicit staff locking
And I click save
Then the unit shows a staff lock warning
And when I click on the configuration icon
And I disable explicit staff locking
And I click save
Then the unit does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
self._toggle_lock_on_unlocked_item(unit)
def test_subsections_can_be_locked(self):
"""
Scenario: Subsections can be locked and unlocked from the course outline page
Given I have a course with a subsection
When I click on the subsection's configuration icon
And I enable explicit staff locking
And I click save
Then the subsection shows a staff lock warning
And all its descendants are staff locked
And when I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
self._toggle_lock_on_unlocked_item(subsection)
def test_sections_can_be_locked(self):
"""
Scenario: Sections can be locked and unlocked from the course outline page
Given I have a course with a section
When I click on the section's configuration icon
And I enable explicit staff locking
And I click save
Then the section shows a staff lock warning
And all its descendants are staff locked
And when I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
self._toggle_lock_on_unlocked_item(section)
def test_explicit_staff_lock_remains_after_unlocking_section(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a section
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a section and one of its units
When I click on the section's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
unit = section.subsection_at(0).unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, section)
def test_explicit_staff_lock_remains_after_unlocking_subsection(self):
"""
Scenario: An explicitly locked unit is still locked after removing an inherited lock from a subsection
Given I have a course with sections, subsections, and units
And I have enabled explicit staff lock on a subsection and one of its units
When I click on the subsection's configuration icon
And I disable explicit staff locking
And I click save
Then the unit still shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
self._verify_explicit_staff_lock_remains_after_unlocking_parent(unit, subsection)
def test_section_displays_lock_when_all_subsections_locked(self):
"""
Scenario: All subsections in section are explicitly locked, section should display staff only warning
Given I have a course one section and two subsections
When I enable explicit staff lock on all the subsections
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
section.subsection_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_section_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in a section are explicitly locked, section should display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on all the units
Then the section shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(0).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertTrue(section.has_staff_lock_warning)
def test_subsection_displays_lock_when_all_units_locked(self):
"""
Scenario: All units in subsection are explicitly locked, subsection should display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on all the units
Then the subsection shows a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
subsection.unit_at(1).set_staff_lock(True)
self.assertTrue(subsection.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_subsections_locked(self):
"""
Scenario: Only some subsections in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section and two subsections
When I enable explicit staff lock on one subsection
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_section_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in section are explicitly locked, section should NOT display staff only warning
Given I have a course with one section, two subsections, and four units
When I enable explicit staff lock on three units
Then the section does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.subsection_at(0).unit_at(0).set_staff_lock(True)
section.subsection_at(0).unit_at(1).set_staff_lock(True)
section.subsection_at(1).unit_at(1).set_staff_lock(True)
self.assertFalse(section.has_staff_lock_warning)
def test_subsection_does_not_display_lock_when_some_units_locked(self):
"""
Scenario: Only some units in subsection are explicitly locked, subsection should NOT display staff only warning
Given I have a course with one subsection and two units
When I enable explicit staff lock on one unit
Then the subsection does not show a staff lock warning
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.unit_at(0).set_staff_lock(True)
self.assertFalse(subsection.has_staff_lock_warning)
def test_locked_sections_do_not_appear_in_lms(self):
"""
Scenario: A locked section is not visible to students in the LMS
Given I have a course with two sections
When I enable explicit staff lock on one section
And I click the View Live button to switch to staff view
Then I see two sections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.course_outline_page.section_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_sections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_sections, 1)
def test_locked_subsections_do_not_appear_in_lms(self):
"""
Scenario: A locked subsection is not visible to students in the LMS
Given I have a course with two subsections
When I enable explicit staff lock on one subsection
And I click the View Live button to switch to staff view
Then I see two subsections in the sidebar
And when I switch the view mode to student view
Then I see one section in the sidebar
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(1).set_staff_lock(True)
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_subsections, 2)
StaffPage(self.browser, self.course_id).set_staff_view_mode('Student')
self.assertEqual(courseware.num_subsections, 1)
def test_toggling_staff_lock_on_section_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a section will not publish its draft units
Given I have a course with a section and unit
And the unit has a draft and published version
When I enable explicit staff lock on the section
And I disable explicit staff lock on the section
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
section.set_staff_lock(True)
section.set_staff_lock(False)
unit = section.subsection_at(0).unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_toggling_staff_lock_on_subsection_does_not_publish_draft_units(self):
"""
Scenario: Locking and unlocking a subsection will not publish its draft units
Given I have a course with a subsection and unit
And the unit has a draft and published version
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
And I click the View Live button to switch to staff view
Then I see the published version of the unit
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
add_discussion(unit)
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
subsection.set_staff_lock(False)
unit = subsection.unit_at(0).go_to()
unit.view_published_version()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 0)
def test_removing_staff_lock_from_unit_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which does not inherit staff lock displays a warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the unit
And I disable explicit staff lock on the unit
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, True)
def test_removing_staff_lock_from_subsection_without_inherited_lock_shows_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which does not inherit staff lock displays a warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the subsection
And I disable explicit staff lock on the subsection
Then I see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, True)
def test_removing_staff_lock_from_unit_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a unit which also inherits staff lock displays no warning.
Given I have a course with a subsection and unit
When I enable explicit staff lock on the subsection
And I enable explicit staff lock on the unit
When I disable explicit staff lock on the unit
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
subsection = self.course_outline_page.section_at(0).subsection_at(0)
unit = subsection.unit_at(0)
subsection.set_staff_lock(True)
unit.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(unit, False)
def test_removing_staff_lock_from_subsection_with_inherited_lock_shows_no_warning(self):
"""
Scenario: Removing explicit staff lock from a subsection which also inherits staff lock displays no warning.
Given I have a course with a section and subsection
When I enable explicit staff lock on the section
And I enable explicit staff lock on the subsection
When I disable explicit staff lock on the subsection
Then I do not see a modal warning.
"""
self.course_outline_page.visit()
self.course_outline_page.expand_all_subsections()
section = self.course_outline_page.section_at(0)
subsection = section.subsection_at(0)
section.set_staff_lock(True)
subsection.set_staff_lock(True)
self._remove_staff_lock_and_verify_warning(subsection, False)
@attr('shard_3')
class EditNamesTest(CourseOutlineTest):
"""
Feature: Click-to-edit section/subsection names
"""
__test__ = True
def set_name_and_verify(self, item, old_name, new_name, expected_name):
"""
Changes the display name of item from old_name to new_name, then verifies that its value is expected_name.
"""
self.assertEqual(item.name, old_name)
item.change_name(new_name)
self.assertFalse(item.in_editable_form())
self.assertEqual(item.name, expected_name)
def test_edit_section_name(self):
"""
Scenario: Click-to-edit section name
Given that I have created a section
When I click on the name of section
Then the section name becomes editable
And given that I have edited the section name
When I click outside of the edited section name
Then the section name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'Changed',
'Changed'
)
def test_edit_subsection_name(self):
"""
Scenario: Click-to-edit subsection name
Given that I have created a subsection
When I click on the name of subsection
Then the subsection name becomes editable
And given that I have edited the subsection name
When I click outside of the edited subsection name
Then the subsection name saves
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'Changed',
'Changed'
)
def test_edit_empty_section_name(self):
"""
Scenario: Click-to-edit section name, enter empty name
Given that I have created a section
And I have clicked to edit the name of the section
And I have entered an empty section name
When I click outside of the edited section name
Then the section name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0),
'Test Section',
'',
'Test Section'
)
def test_edit_empty_subsection_name(self):
"""
Scenario: Click-to-edit subsection name, enter empty name
Given that I have created a subsection
And I have clicked to edit the name of the subsection
And I have entered an empty subsection name
When I click outside of the edited subsection name
Then the subsection name does not change
And becomes non-editable
"""
self.course_outline_page.visit()
self.set_name_and_verify(
self.course_outline_page.section_at(0).subsection_at(0),
'Test Subsection',
'',
'Test Subsection'
)
def test_editing_names_does_not_expand_collapse(self):
"""
Scenario: A section stays in the same expand/collapse state while its name is edited
Given that I have created a section
And the section is collapsed
When I click on the name of the section
Then the section is collapsed
And given that I have entered a new name
Then the section is collapsed
And given that I press ENTER to finalize the name
Then the section is collapsed
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).expand_subsection()
self.assertFalse(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).edit_name()
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).enter_name('Changed')
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
self.course_outline_page.section_at(0).finalize_name()
self.assertTrue(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class CreateSectionsTest(CourseOutlineTest):
"""
Feature: Create new sections/subsections/units
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a completely empty course to easily test adding things to it """
pass
def test_create_new_section_from_top_button(self):
"""
Scenario: Create new section from button at top of page
Given that I am on the course outline
When I click the "+ Add section" button at the top of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button(self):
"""
Scenario: Create new section from button at bottom of page
Given that I am on the course outline
When I click the "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_section_from_bottom_button_plus_icon(self):
"""
Scenario: Create new section from button plus icon at bottom of page
Given that I am on the course outline
When I click the plus icon in "+ Add section" button at the bottom of the page
Then I see a new section added to the bottom of the page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_bottom_button(click_child_icon=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.assertTrue(self.course_outline_page.section_at(0).in_editable_form())
def test_create_new_subsection(self):
"""
Scenario: Create new subsection
Given that I have created a section
When I click the "+ Add subsection" button in that section
Then I see a new subsection added to the bottom of the section
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
subsections = self.course_outline_page.section_at(0).subsections()
self.assertEqual(len(subsections), 1)
self.assertTrue(subsections[0].in_editable_form())
def test_create_new_unit(self):
"""
Scenario: Create new unit
Given that I have created a section
And that I have created a subsection within that section
When I click the "+ Add unit" button in that subsection
Then I am redirected to a New Unit page
And the display name is in its editable form.
"""
self.course_outline_page.visit()
self.course_outline_page.add_section_from_top_button()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).add_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).add_unit()
unit_page = ContainerPage(self.browser, None)
EmptyPromise(unit_page.is_browser_on_page, 'Browser is on the unit page').fulfill()
self.assertTrue(unit_page.is_inline_editing_display_name())
@attr('shard_3')
class DeleteContentTest(CourseOutlineTest):
"""
Feature: Deleting sections/subsections/units
"""
__test__ = True
def test_delete_section(self):
"""
Scenario: Delete section
Given that I am on the course outline
When I click the delete button for a section on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the section
When I click "Yes, I want to delete this component"
Then the confirmation message should close
And the section should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
def test_cancel_delete_section(self):
"""
Scenario: Cancel delete of section
Given that I clicked the delte button for a section on the course outline
And I received a confirmation message, asking me if I really want to delete the component
When I click "Cancel"
Then the confirmation message should close
And the section should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.sections()), 1)
self.course_outline_page.section_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.sections()), 1)
def test_delete_subsection(self):
"""
Scenario: Delete subsection
Given that I am on the course outline
When I click the delete button for a subsection on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the subsection
When I click "Yes, I want to delete this component"
Then the confiramtion message should close
And the subsection should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 0)
def test_cancel_delete_subsection(self):
"""
Scenario: Cancel delete of subsection
Given that I clicked the delete button for a subsection on the course outline
And I received a confirmation message, asking me if I really want to delete the subsection
When I click "cancel"
Then the confirmation message should close
And the subsection should remain in the course outline
"""
self.course_outline_page.visit()
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
self.course_outline_page.section_at(0).subsection_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsections()), 1)
def test_delete_unit(self):
"""
Scenario: Delete unit
Given that I am on the course outline
When I click the delete button for a unit on the course outline
Then I should receive a confirmation message, asking me if I really want to delete the unit
When I click "Yes, I want to delete this unit"
Then the confirmation message should close
And the unit should immediately be deleted from the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 0)
def test_cancel_delete_unit(self):
"""
Scenario: Cancel delete of unit
Given that I clicked the delete button for a unit on the course outline
And I received a confirmation message, asking me if I really want to delete the unit
When I click "Cancel"
Then the confirmation message should close
And the unit should remain in the course outline
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).delete(cancel=True)
self.assertEqual(len(self.course_outline_page.section_at(0).subsection_at(0).units()), 1)
def test_delete_all_no_content_message(self):
"""
Scenario: Delete all sections/subsections/units in a course, "no content" message should appear
Given that I delete all sections, subsections, and units in a course
When I visit the course outline
Then I will see a message that says, "You haven't added any content to this course yet"
Add see a + Add Section button
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.has_no_content_message)
self.course_outline_page.section_at(0).delete()
self.assertEqual(len(self.course_outline_page.sections()), 0)
self.assertTrue(self.course_outline_page.has_no_content_message)
@attr('shard_3')
class ExpandCollapseMultipleSectionsTest(CourseOutlineTest):
"""
Feature: Courses with multiple sections can expand and collapse all sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with a course with two sections """
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
),
XBlockFixtureDesc('chapter', 'Test Section 2').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 2')
)
)
)
def verify_all_sections(self, collapsed):
"""
Verifies that all sections are collapsed if collapsed is True, otherwise all expanded.
"""
for section in self.course_outline_page.sections():
self.assertEqual(collapsed, section.is_collapsed)
def toggle_all_sections(self):
"""
Toggles the expand collapse state of all sections.
"""
for section in self.course_outline_page.sections():
section.expand_subsection()
def test_expanded_by_default(self):
"""
Scenario: The default layout for the outline page is to show sections in expanded view
Given I have a course with sections
When I navigate to the course outline page
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with multiple sections
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
for section in self.course_outline_page.sections():
section.delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_collapse_all_when_all_expanded(self):
"""
Scenario: Collapse all sections when all sections are expanded
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_collapse_all_when_some_expanded(self):
"""
Scenario: Collapsing all sections when 1 or more sections are already collapsed
Given I navigate to the outline page of a course with sections
And all sections are expanded
When I collapse the first section
And I click the "Collapse All Sections" link
Then I see the "Expand All Sections" link
And all sections are collapsed
"""
self.course_outline_page.visit()
self.verify_all_sections(collapsed=False)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.verify_all_sections(collapsed=True)
def test_expand_all_when_all_collapsed(self):
"""
Scenario: Expanding all sections when all sections are collapsed
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
def test_expand_all_when_some_collapsed(self):
"""
Scenario: Expanding all sections when 1 or more sections are already expanded
Given I navigate to the outline page of a course with multiple sections
And I click the "Collapse All Sections" link
When I expand the first section
And I click the "Expand All Sections" link
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.EXPAND)
self.course_outline_page.section_at(0).expand_subsection()
self.course_outline_page.toggle_expand_collapse()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.verify_all_sections(collapsed=False)
@attr('shard_3')
class ExpandCollapseSingleSectionTest(CourseOutlineTest):
"""
Feature: Courses with a single section can expand and collapse all sections.
"""
__test__ = True
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Collapse link is removed after last section of a course is deleted
Given I have a course with one section
And I navigate to the course outline page
When I will confirm all alerts
And I press the "section" delete icon
Then I do not see the "Collapse All Sections" link
And I will see a message that says "You haven't added any content to this course yet"
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).delete()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.assertTrue(self.course_outline_page.has_no_content_message)
def test_old_subsection_stays_collapsed_after_creation(self):
"""
Scenario: Collapsed subsection stays collapsed after creating a new subsection
Given I have a course with one section and subsection
And I navigate to the course outline page
Then the subsection is collapsed
And when I create a new subsection
Then the first subsection is collapsed
And the second subsection is expanded
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.course_outline_page.section_at(0).add_subsection()
self.assertTrue(self.course_outline_page.section_at(0).subsection_at(0).is_collapsed)
self.assertFalse(self.course_outline_page.section_at(0).subsection_at(1).is_collapsed)
@attr('shard_3')
class ExpandCollapseEmptyTest(CourseOutlineTest):
"""
Feature: Courses with no sections initially can expand and collapse all sections after addition.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_no_expand_link_for_empty_course(self):
"""
Scenario: Expand/collapse for a course with no sections
Given I have a course with no sections
When I navigate to the course outline page
Then I do not see the "Collapse All Sections" link
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
def test_link_appears_after_section_creation(self):
"""
Scenario: Collapse link appears after creating first section of a course
Given I have a course with no sections
When I navigate to the course outline page
And I add a section
Then I see the "Collapse All Sections" link
And all sections are expanded
"""
self.course_outline_page.visit()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.MISSING)
self.course_outline_page.add_section_from_top_button()
self.assertEquals(self.course_outline_page.expand_collapse_link_state, ExpandCollapseLinkState.COLLAPSE)
self.assertFalse(self.course_outline_page.section_at(0).is_collapsed)
@attr('shard_3')
class DefaultStatesEmptyTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with an empty course
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
""" Start with an empty course """
pass
def test_empty_course_message(self):
"""
Scenario: Empty course state
Given that I am in a course with no sections, subsections, nor units
When I visit the course outline
Then I will see a message that says "You haven't added any content to this course yet"
And see a + Add Section button
"""
self.course_outline_page.visit()
self.assertTrue(self.course_outline_page.has_no_content_message)
self.assertTrue(self.course_outline_page.bottom_add_section_button.is_present())
@attr('shard_3')
class DefaultStatesContentTest(CourseOutlineTest):
"""
Feature: Misc course outline default states/actions when starting with a course with content
"""
__test__ = True
def test_view_live(self):
"""
Scenario: View Live version from course outline
Given that I am on the course outline
When I click the "View Live" button
Then a new tab will open to the course on the LMS
"""
self.course_outline_page.visit()
self.course_outline_page.view_live()
courseware = CoursewarePage(self.browser, self.course_id)
courseware.wait_for_page()
self.assertEqual(courseware.num_xblock_components, 3)
self.assertEqual(courseware.xblock_component_type(0), 'problem')
self.assertEqual(courseware.xblock_component_type(1), 'html')
self.assertEqual(courseware.xblock_component_type(2), 'discussion')
@attr('shard_3')
class UnitNavigationTest(CourseOutlineTest):
"""
Feature: Navigate to units
"""
__test__ = True
def test_navigate_to_unit(self):
"""
Scenario: Click unit name to navigate to unit page
Given that I have expanded a section/subsection so I can see unit names
When I click on a unit name
Then I will be taken to the appropriate unit page
"""
self.course_outline_page.visit()
self.course_outline_page.section_at(0).subsection_at(0).expand_subsection()
unit = self.course_outline_page.section_at(0).subsection_at(0).unit_at(0).go_to()
self.assertTrue(unit.is_browser_on_page)
@attr('shard_3')
class PublishSectionTest(CourseOutlineTest):
"""
Feature: Publish sections.
"""
__test__ = True
def populate_course_fixture(self, course_fixture):
"""
Sets up a course structure with 2 subsections inside a single section.
The first subsection has 2 units, and the second subsection has one unit.
"""
self.courseware = CoursewarePage(self.browser, self.course_id)
self.course_nav = CourseNavPage(self.browser)
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME),
XBlockFixtureDesc('vertical', 'Test Unit 2'),
),
XBlockFixtureDesc('sequential', 'Test Subsection 2').add_children(
XBlockFixtureDesc('vertical', 'Test Unit 3'),
),
),
)
def test_unit_publishing(self):
"""
Scenario: Can publish a unit and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the first unit, subsection, section
When I publish the first unit
Then I see that publish button for the first unit disappears
And I see publish buttons for subsection, section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
unit.publish()
self.assertFalse(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
def test_subsection_publishing(self):
"""
Scenario: Can publish a subsection and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the first subsection
Then I see that publish button for the first subsection disappears
And I see that publish buttons disappear for the child units of the subsection
And I see publish button for section
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(unit.publish_action)
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.course_outline_page.section(SECTION_NAME).subsection(SUBSECTION_NAME).publish()
self.assertFalse(unit.publish_action)
self.assertFalse(subsection.publish_action)
self.assertTrue(section.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
def test_section_publishing(self):
"""
Scenario: Can publish a section and see published content in LMS
Given I have a section with 2 subsections and 3 unpublished units
When I go to the course outline
Then I see publish button for the unit, subsection, section
When I publish the section
Then I see that publish buttons disappears
And I see the changed content in LMS
"""
self._add_unpublished_content()
self.course_outline_page.visit()
section, subsection, unit = self._get_items()
self.assertTrue(subsection.publish_action)
self.assertTrue(section.publish_action)
self.assertTrue(unit.publish_action)
self.course_outline_page.section(SECTION_NAME).publish()
self.assertFalse(subsection.publish_action)
self.assertFalse(section.publish_action)
self.assertFalse(unit.publish_action)
self.courseware.visit()
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_sequential_position(2)
self.assertEqual(1, self.courseware.num_xblock_components)
self.course_nav.go_to_section(SECTION_NAME, 'Test Subsection 2')
self.assertEqual(1, self.courseware.num_xblock_components)
def _add_unpublished_content(self):
"""
Adds unpublished HTML content to first three units in the course.
"""
for index in xrange(3):
self.course_fixture.create_xblock(
self.course_fixture.get_nested_xblocks(category="vertical")[index].locator,
XBlockFixtureDesc('html', 'Unpublished HTML Component ' + str(index)),
)
def _get_items(self):
"""
Returns first section, subsection, and unit on the page.
"""
section = self.course_outline_page.section(SECTION_NAME)
subsection = section.subsection(SUBSECTION_NAME)
unit = subsection.expand_subsection().unit(UNIT_NAME)
return (section, subsection, unit)
@attr('shard_3')
class DeprecationWarningMessageTest(CourseOutlineTest):
"""
Feature: Verify deprecation warning message.
"""
HEADING_TEXT = 'This course uses features that are no longer supported.'
COMPONENT_LIST_HEADING = 'You must delete or replace the following components.'
ADVANCE_MODULES_REMOVE_TEXT = ('To avoid errors, edX strongly recommends that you remove unsupported features '
'from the course advanced settings. To do this, go to the Advanced Settings '
'page, locate the "Advanced Module List" setting, and then delete the following '
'modules from the list.')
DEFAULT_DISPLAYNAME = "Deprecated Component"
def _add_deprecated_advance_modules(self, block_types):
"""
Add `block_types` into `Advanced Module List`
Arguments:
block_types (list): list of block types
"""
self.advanced_settings.visit()
self.advanced_settings.set_values({"Advanced Module List": json.dumps(block_types)})
def _create_deprecated_components(self):
"""
Create deprecated components.
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc('poll', "Poll", data=load_data_str('poll_markdown.xml'))
)
self.course_fixture.create_xblock(parent_vertical.locator, XBlockFixtureDesc('survey', 'Survey'))
def _verify_deprecation_warning_info(
self,
deprecated_blocks_present,
components_present,
components_display_name_list=None,
deprecated_modules_list=None
):
"""
Verify deprecation warning
Arguments:
deprecated_blocks_present (bool): deprecated blocks remove text and
is list is visible if True else False
components_present (bool): components list shown if True else False
components_display_name_list (list): list of components display name
deprecated_modules_list (list): list of deprecated advance modules
"""
self.assertTrue(self.course_outline_page.deprecated_warning_visible)
self.assertEqual(self.course_outline_page.warning_heading_text, self.HEADING_TEXT)
self.assertEqual(self.course_outline_page.modules_remove_text_shown, deprecated_blocks_present)
if deprecated_blocks_present:
self.assertEqual(self.course_outline_page.modules_remove_text, self.ADVANCE_MODULES_REMOVE_TEXT)
self.assertEqual(self.course_outline_page.deprecated_advance_modules, deprecated_modules_list)
self.assertEqual(self.course_outline_page.components_visible, components_present)
if components_present:
self.assertEqual(self.course_outline_page.components_list_heading, self.COMPONENT_LIST_HEADING)
self.assertItemsEqual(self.course_outline_page.components_display_names, components_display_name_list)
def test_no_deprecation_warning_message_present(self):
"""
Scenario: Verify that deprecation warning message is not shown if no deprecated
advance modules are not present and also no deprecated component exist in
course outline.
When I goto course outline
Then I don't see any deprecation warning
"""
self.course_outline_page.visit()
self.assertFalse(self.course_outline_page.deprecated_warning_visible)
def test_deprecation_warning_message_present(self):
"""
Scenario: Verify deprecation warning message if deprecated modules
and components are present.
Given I have "poll" advance modules present in `Advanced Module List`
And I have created 2 poll components
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see correct poll deprecated warning advance modules remove text
And I see list of poll components with correct display names
"""
self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=True,
components_display_name_list=['Poll', 'Survey'],
deprecated_modules_list=['poll', 'survey']
)
def test_deprecation_warning_with_no_displayname(self):
"""
Scenario: Verify deprecation warning message if poll components are present.
Given I have created 1 poll deprecated component
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see list of poll components with correct message
"""
parent_vertical = self.course_fixture.get_nested_xblocks(category="vertical")[0]
# Create a deprecated component with display_name to be empty and make sure
# the deprecation warning is displayed with
self.course_fixture.create_xblock(
parent_vertical.locator,
XBlockFixtureDesc(category='poll', display_name="", data=load_data_str('poll_markdown.xml'))
)
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=[self.DEFAULT_DISPLAYNAME],
)
def test_warning_with_poll_advance_modules_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
poll advance modules are present and no poll component exist.
Given I have poll advance modules present in `Advanced Module List`
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I see correct poll deprecated warning advance modules remove text
And I don't see list of poll components
"""
self._add_deprecated_advance_modules(block_types=['poll', 'survey'])
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=True,
components_present=False,
deprecated_modules_list=['poll', 'survey']
)
def test_warning_with_poll_components_only(self):
"""
Scenario: Verify that deprecation warning message is shown if only
poll component exist and no poll advance modules are present.
Given I have created two poll components
When I go to course outline
Then I see poll deprecated warning
And I see correct poll deprecated warning heading text
And I don't see poll deprecated warning advance modules remove text
And I see list of poll components with correct display names
"""
self._create_deprecated_components()
self.course_outline_page.visit()
self._verify_deprecation_warning_info(
deprecated_blocks_present=False,
components_present=True,
components_display_name_list=['Poll', 'Survey']
)
@attr('shard_4')
class SelfPacedOutlineTest(CourseOutlineTest):
"""Test the course outline for a self-paced course."""
def populate_course_fixture(self, course_fixture):
course_fixture.add_children(
XBlockFixtureDesc('chapter', SECTION_NAME).add_children(
XBlockFixtureDesc('sequential', SUBSECTION_NAME).add_children(
XBlockFixtureDesc('vertical', UNIT_NAME)
)
),
)
self.course_fixture.add_course_details({
'self_paced': True,
'start_date': datetime.now() + timedelta(days=1)
})
ConfigModelFixture('/config/self_paced', {'enabled': True}).install()
def test_release_dates_not_shown(self):
"""
Scenario: Ensure that block release dates are not shown on the
course outline page of a self-paced course.
Given I am the author of a self-paced course
When I go to the course outline
Then I should not see release dates for course content
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
self.assertEqual(section.release_date, '')
subsection = section.subsection(SUBSECTION_NAME)
self.assertEqual(subsection.release_date, '')
def test_edit_section_and_subsection(self):
"""
Scenario: Ensure that block release/due dates are not shown
in their settings modals.
Given I am the author of a self-paced course
When I go to the course outline
And I click on settings for a section or subsection
Then I should not see release or due date settings
"""
self.course_outline_page.visit()
section = self.course_outline_page.section(SECTION_NAME)
modal = section.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
modal.cancel()
subsection = section.subsection(SUBSECTION_NAME)
modal = subsection.edit()
self.assertFalse(modal.has_release_date())
self.assertFalse(modal.has_due_date())
| agpl-3.0 |
didzis/AMRParsing | stanfordnlp/unidecode/x057.py | 252 | 4631 | data = (
'Guo ', # 0x00
'Yin ', # 0x01
'Hun ', # 0x02
'Pu ', # 0x03
'Yu ', # 0x04
'Han ', # 0x05
'Yuan ', # 0x06
'Lun ', # 0x07
'Quan ', # 0x08
'Yu ', # 0x09
'Qing ', # 0x0a
'Guo ', # 0x0b
'Chuan ', # 0x0c
'Wei ', # 0x0d
'Yuan ', # 0x0e
'Quan ', # 0x0f
'Ku ', # 0x10
'Fu ', # 0x11
'Yuan ', # 0x12
'Yuan ', # 0x13
'E ', # 0x14
'Tu ', # 0x15
'Tu ', # 0x16
'Tu ', # 0x17
'Tuan ', # 0x18
'Lue ', # 0x19
'Hui ', # 0x1a
'Yi ', # 0x1b
'Yuan ', # 0x1c
'Luan ', # 0x1d
'Luan ', # 0x1e
'Tu ', # 0x1f
'Ya ', # 0x20
'Tu ', # 0x21
'Ting ', # 0x22
'Sheng ', # 0x23
'Pu ', # 0x24
'Lu ', # 0x25
'Iri ', # 0x26
'Ya ', # 0x27
'Zai ', # 0x28
'Wei ', # 0x29
'Ge ', # 0x2a
'Yu ', # 0x2b
'Wu ', # 0x2c
'Gui ', # 0x2d
'Pi ', # 0x2e
'Yi ', # 0x2f
'Di ', # 0x30
'Qian ', # 0x31
'Qian ', # 0x32
'Zhen ', # 0x33
'Zhuo ', # 0x34
'Dang ', # 0x35
'Qia ', # 0x36
'Akutsu ', # 0x37
'Yama ', # 0x38
'Kuang ', # 0x39
'Chang ', # 0x3a
'Qi ', # 0x3b
'Nie ', # 0x3c
'Mo ', # 0x3d
'Ji ', # 0x3e
'Jia ', # 0x3f
'Zhi ', # 0x40
'Zhi ', # 0x41
'Ban ', # 0x42
'Xun ', # 0x43
'Tou ', # 0x44
'Qin ', # 0x45
'Fen ', # 0x46
'Jun ', # 0x47
'Keng ', # 0x48
'Tun ', # 0x49
'Fang ', # 0x4a
'Fen ', # 0x4b
'Ben ', # 0x4c
'Tan ', # 0x4d
'Kan ', # 0x4e
'Pi ', # 0x4f
'Zuo ', # 0x50
'Keng ', # 0x51
'Bi ', # 0x52
'Xing ', # 0x53
'Di ', # 0x54
'Jing ', # 0x55
'Ji ', # 0x56
'Kuai ', # 0x57
'Di ', # 0x58
'Jing ', # 0x59
'Jian ', # 0x5a
'Tan ', # 0x5b
'Li ', # 0x5c
'Ba ', # 0x5d
'Wu ', # 0x5e
'Fen ', # 0x5f
'Zhui ', # 0x60
'Po ', # 0x61
'Pan ', # 0x62
'Tang ', # 0x63
'Kun ', # 0x64
'Qu ', # 0x65
'Tan ', # 0x66
'Zhi ', # 0x67
'Tuo ', # 0x68
'Gan ', # 0x69
'Ping ', # 0x6a
'Dian ', # 0x6b
'Gua ', # 0x6c
'Ni ', # 0x6d
'Tai ', # 0x6e
'Pi ', # 0x6f
'Jiong ', # 0x70
'Yang ', # 0x71
'Fo ', # 0x72
'Ao ', # 0x73
'Liu ', # 0x74
'Qiu ', # 0x75
'Mu ', # 0x76
'Ke ', # 0x77
'Gou ', # 0x78
'Xue ', # 0x79
'Ba ', # 0x7a
'Chi ', # 0x7b
'Che ', # 0x7c
'Ling ', # 0x7d
'Zhu ', # 0x7e
'Fu ', # 0x7f
'Hu ', # 0x80
'Zhi ', # 0x81
'Chui ', # 0x82
'La ', # 0x83
'Long ', # 0x84
'Long ', # 0x85
'Lu ', # 0x86
'Ao ', # 0x87
'Tay ', # 0x88
'Pao ', # 0x89
'[?] ', # 0x8a
'Xing ', # 0x8b
'Dong ', # 0x8c
'Ji ', # 0x8d
'Ke ', # 0x8e
'Lu ', # 0x8f
'Ci ', # 0x90
'Chi ', # 0x91
'Lei ', # 0x92
'Gai ', # 0x93
'Yin ', # 0x94
'Hou ', # 0x95
'Dui ', # 0x96
'Zhao ', # 0x97
'Fu ', # 0x98
'Guang ', # 0x99
'Yao ', # 0x9a
'Duo ', # 0x9b
'Duo ', # 0x9c
'Gui ', # 0x9d
'Cha ', # 0x9e
'Yang ', # 0x9f
'Yin ', # 0xa0
'Fa ', # 0xa1
'Gou ', # 0xa2
'Yuan ', # 0xa3
'Die ', # 0xa4
'Xie ', # 0xa5
'Ken ', # 0xa6
'Jiong ', # 0xa7
'Shou ', # 0xa8
'E ', # 0xa9
'Ha ', # 0xaa
'Dian ', # 0xab
'Hong ', # 0xac
'Wu ', # 0xad
'Kua ', # 0xae
'[?] ', # 0xaf
'Tao ', # 0xb0
'Dang ', # 0xb1
'Kai ', # 0xb2
'Gake ', # 0xb3
'Nao ', # 0xb4
'An ', # 0xb5
'Xing ', # 0xb6
'Xian ', # 0xb7
'Huan ', # 0xb8
'Bang ', # 0xb9
'Pei ', # 0xba
'Ba ', # 0xbb
'Yi ', # 0xbc
'Yin ', # 0xbd
'Han ', # 0xbe
'Xu ', # 0xbf
'Chui ', # 0xc0
'Cen ', # 0xc1
'Geng ', # 0xc2
'Ai ', # 0xc3
'Peng ', # 0xc4
'Fang ', # 0xc5
'Que ', # 0xc6
'Yong ', # 0xc7
'Xun ', # 0xc8
'Jia ', # 0xc9
'Di ', # 0xca
'Mai ', # 0xcb
'Lang ', # 0xcc
'Xuan ', # 0xcd
'Cheng ', # 0xce
'Yan ', # 0xcf
'Jin ', # 0xd0
'Zhe ', # 0xd1
'Lei ', # 0xd2
'Lie ', # 0xd3
'Bu ', # 0xd4
'Cheng ', # 0xd5
'Gomi ', # 0xd6
'Bu ', # 0xd7
'Shi ', # 0xd8
'Xun ', # 0xd9
'Guo ', # 0xda
'Jiong ', # 0xdb
'Ye ', # 0xdc
'Nian ', # 0xdd
'Di ', # 0xde
'Yu ', # 0xdf
'Bu ', # 0xe0
'Ya ', # 0xe1
'Juan ', # 0xe2
'Sui ', # 0xe3
'Pi ', # 0xe4
'Cheng ', # 0xe5
'Wan ', # 0xe6
'Ju ', # 0xe7
'Lun ', # 0xe8
'Zheng ', # 0xe9
'Kong ', # 0xea
'Chong ', # 0xeb
'Dong ', # 0xec
'Dai ', # 0xed
'Tan ', # 0xee
'An ', # 0xef
'Cai ', # 0xf0
'Shu ', # 0xf1
'Beng ', # 0xf2
'Kan ', # 0xf3
'Zhi ', # 0xf4
'Duo ', # 0xf5
'Yi ', # 0xf6
'Zhi ', # 0xf7
'Yi ', # 0xf8
'Pei ', # 0xf9
'Ji ', # 0xfa
'Zhun ', # 0xfb
'Qi ', # 0xfc
'Sao ', # 0xfd
'Ju ', # 0xfe
'Ni ', # 0xff
)
| gpl-2.0 |
0asa/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
trezorg/django | django/conf/locale/sr_Latn/formats.py | 655 | 1980 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
'%Y-%m-%d', # '2006-10-25'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
billbonney/ardupilot | mk/VRBRAIN/Tools/genmsg/src/genmsg/msg_loader.py | 51 | 20963 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
"""
Loader for messages and :class:`MsgContext` that assumes a
dictionary-based search path scheme (keys are the package/namespace,
values are the paths). Compatible with ROS package system and other
possible layouts.
"""
import os
import sys
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
from . base import InvalidMsgSpec, log, SEP, COMMENTCHAR, CONSTCHAR, IODELIM, EXT_MSG, EXT_SRV
from . msgs import MsgSpec, TIME, TIME_MSG, DURATION, DURATION_MSG, HEADER, HEADER_FULL_NAME, \
is_builtin, is_valid_msg_field_name, is_valid_msg_type, bare_msg_type, is_valid_constant_type, \
Field, Constant, resolve_type
from . names import normalize_package_context, package_resource_name
from . srvs import SrvSpec
class MsgNotFound(Exception):
pass
def get_msg_file(package, base_type, search_path, ext=EXT_MSG):
"""
Determine the file system path for the specified ``.msg`` on
*search_path*.
:param package: name of package file is in, ``str``
:param base_type: type name of message, e.g. 'Point2DFloat32', ``str``
:param search_path: dictionary mapping message namespaces to a directory locations
:param ext: msg file extension. Override with EXT_SRV to search for services instead.
:returns: filesystem path of requested file, ``str``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("msg_file(%s, %s, %s)" % (package, base_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
if not package in search_path:
raise MsgNotFound("Cannot locate message [%s]: unknown package [%s] on search path [%s]" \
% (base_type, package, search_path))
else:
for path_tmp in search_path[package]:
path = os.path.join(path_tmp, "%s%s"%(base_type, ext))
if os.path.isfile(path):
return path
raise MsgNotFound("Cannot locate message [%s] in package [%s] with paths [%s]"%
(base_type, package, str(search_path[package])))
def get_srv_file(package, base_type, search_path):
"""
Determine the file system path for the specified .srv on path.
:param package: name of package ``.srv`` file is in, ``str``
:param base_type: type name of service, e.g. 'Empty', ``str``
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: file path of ``.srv`` file in specified package, ``str``
:raises: :exc:`MsgNotFound` If service file cannot be located.
"""
return get_msg_file(package, base_type, search_path, ext=EXT_SRV)
def load_msg_by_type(msg_context, msg_type, search_path):
"""
Load message specification for specified type.
NOTE: this will register the message in the *msg_context*.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param msg_type: relative or full message type.
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: :class:`MsgSpec` instance, ``(str, MsgSpec)``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("load_msg_by_type(%s, %s)" % (msg_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
if msg_type == HEADER:
msg_type = HEADER_FULL_NAME
package_name, base_type = package_resource_name(msg_type)
file_path = get_msg_file(package_name, base_type, search_path)
log("file_path", file_path)
spec = load_msg_from_file(msg_context, file_path, msg_type)
msg_context.set_file(msg_type, file_path)
return spec
def load_srv_by_type(msg_context, srv_type, search_path):
"""
Load service specification for specified type.
NOTE: services are *never* registered in a :class:`MsgContext`.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param srv_type: relative or full message type.
:param search_path: dictionary mapping message namespaces to a directory locations
:returns: :class:`MsgSpec` instance, ``(str, MsgSpec)``
:raises: :exc:`MsgNotFound` If message cannot be located.
"""
log("load_srv_by_type(%s, %s)" % (srv_type, str(search_path)))
if not isinstance(search_path, dict):
raise ValueError("search_path must be a dictionary of {namespace: dirpath}")
package_name, base_type = package_resource_name(srv_type)
file_path = get_srv_file(package_name, base_type, search_path)
log("file_path", file_path)
return load_srv_from_file(msg_context, file_path, srv_type)
def convert_constant_value(field_type, val):
"""
Convert constant value declaration to python value. Does not do
type-checking, so ValueError or other exceptions may be raised.
:param field_type: ROS field type, ``str``
:param val: string representation of constant, ``str``
:raises: :exc:`ValueError` If unable to convert to python representation
:raises: :exc:`InvalidMsgSpec` If value exceeds specified integer width
"""
if field_type in ['float32','float64']:
return float(val)
elif field_type in ['string']:
return val.strip() #string constants are always stripped
elif field_type in ['int8', 'uint8', 'int16','uint16','int32','uint32','int64','uint64', 'char', 'byte']:
# bounds checking
bits = [('int8', 8), ('uint8', 8), ('int16', 16),('uint16', 16),\
('int32', 32),('uint32', 32), ('int64', 64),('uint64', 64),\
('byte', 8), ('char', 8)]
b = [b for t, b in bits if t == field_type][0]
import math
if field_type[0] == 'u' or field_type == 'char':
lower = 0
upper = int(math.pow(2, b)-1)
else:
upper = int(math.pow(2, b-1)-1)
lower = -upper - 1 #two's complement min
val = int(val) #python will autocast to long if necessary
if val > upper or val < lower:
raise InvalidMsgSpec("cannot coerce [%s] to %s (out of bounds)"%(val, field_type))
return val
elif field_type == 'bool':
# TODO: need to nail down constant spec for bool
return True if eval(val) else False
raise InvalidMsgSpec("invalid constant type: [%s]"%field_type)
def _load_constant_line(orig_line):
"""
:raises: :exc:`InvalidMsgSpec`
"""
clean_line = _strip_comments(orig_line)
line_splits = [s for s in [x.strip() for x in clean_line.split(" ")] if s] #split type/name, filter out empties
field_type = line_splits[0]
if not is_valid_constant_type(field_type):
raise InvalidMsgSpec("%s is not a legal constant type"%field_type)
if field_type == 'string':
# strings contain anything to the right of the equals sign, there are no comments allowed
idx = orig_line.find(CONSTCHAR)
name = orig_line[orig_line.find(' ')+1:idx]
val = orig_line[idx+1:]
else:
line_splits = [x.strip() for x in ' '.join(line_splits[1:]).split(CONSTCHAR)] #resplit on '='
if len(line_splits) != 2:
raise InvalidMsgSpec("Invalid constant declaration: %s"%l)
name = line_splits[0]
val = line_splits[1]
try:
val_converted = convert_constant_value(field_type, val)
except Exception as e:
raise InvalidMsgSpec("Invalid constant value: %s"%e)
return Constant(field_type, name, val_converted, val.strip())
def _load_field_line(orig_line, package_context):
"""
:returns: (field_type, name) tuple, ``(str, str)``
:raises: :exc:`InvalidMsgSpec`
"""
#log("_load_field_line", orig_line, package_context)
clean_line = _strip_comments(orig_line)
line_splits = [s for s in [x.strip() for x in clean_line.split(" ")] if s] #split type/name, filter out empties
if len(line_splits) != 2:
raise InvalidMsgSpec("Invalid declaration: %s"%(orig_line))
field_type, name = line_splits
if not is_valid_msg_field_name(name):
raise InvalidMsgSpec("%s is not a legal message field name"%name)
if not is_valid_msg_type(field_type):
raise InvalidMsgSpec("%s is not a legal message field type"%field_type)
if package_context and not SEP in field_type:
if field_type == HEADER:
field_type = HEADER_FULL_NAME
elif not is_builtin(bare_msg_type(field_type)):
field_type = "%s/%s"%(package_context, field_type)
elif field_type == HEADER:
field_type = HEADER_FULL_NAME
return field_type, name
def _strip_comments(line):
return line.split(COMMENTCHAR)[0].strip() #strip comments
def load_msg_from_string(msg_context, text, full_name):
"""
Load message specification from a string.
NOTE: this will register the message in the *msg_context*.
:param msg_context: :class:`MsgContext` for finding loaded dependencies
:param text: .msg text , ``str``
:returns: :class:`MsgSpec` specification
:raises: :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
log("load_msg_from_string", full_name)
package_name, short_name = package_resource_name(full_name)
types = []
names = []
constants = []
for orig_line in text.split('\n'):
clean_line = _strip_comments(orig_line)
if not clean_line:
continue #ignore empty lines
if CONSTCHAR in clean_line:
constants.append(_load_constant_line(orig_line))
else:
field_type, name = _load_field_line(orig_line, package_name)
types.append(field_type)
names.append(name)
spec = MsgSpec(types, names, constants, text, full_name, package_name)
msg_context.register(full_name, spec)
return spec
def load_msg_from_file(msg_context, file_path, full_name):
"""
Convert the .msg representation in the file to a :class:`MsgSpec` instance.
NOTE: this will register the message in the *msg_context*.
:param file_path: path of file to load from, ``str``
:returns: :class:`MsgSpec` instance
:raises: :exc:`InvalidMsgSpec`: if syntax errors or other problems are detected in file
"""
log("Load spec from", file_path)
with open(file_path, 'r') as f:
text = f.read()
try:
return load_msg_from_string(msg_context, text, full_name)
except InvalidMsgSpec as e:
raise InvalidMsgSpec('%s: %s'%(file_path, e))
def load_msg_depends(msg_context, spec, search_path):
"""
Add the list of message types that spec depends on to depends.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: message to compute dependencies for, :class:`MsgSpec`/:class:`SrvSpec`
:param search_path: dictionary mapping message namespaces to a directory locations
:param deps: for recursion use only, do not set
:returns: list of dependency names, ``[str]``
:raises: :exc:`MsgNotFound` If dependency cannot be located.
"""
package_context = spec.package
log("load_msg_depends <spec>", spec.full_name, package_context)
depends = []
# Iterate over each field, loading as necessary
for unresolved_type in spec.types:
bare_type = bare_msg_type(unresolved_type)
resolved_type = resolve_type(bare_type, package_context)
if is_builtin(resolved_type):
continue
# Retrieve the MsgSpec instance of the field
if msg_context.is_registered(resolved_type):
depspec = msg_context.get_registered(resolved_type)
else:
# load and register on demand
depspec = load_msg_by_type(msg_context, resolved_type, search_path)
msg_context.register(resolved_type, depspec)
# Update dependencies
depends.append(resolved_type)
# - check to see if we have compute dependencies of field
dep_dependencies = msg_context.get_depends(resolved_type)
if dep_dependencies is None:
load_msg_depends(msg_context, depspec, search_path)
assert spec.full_name, "MsgSpec must have a properly set full name"
msg_context.set_depends(spec.full_name, depends)
# have to copy array in order to prevent inadvertent mutation (we've stored this list in set_dependencies)
return depends[:]
def load_depends(msg_context, spec, msg_search_path):
"""
Compute dependencies of *spec* and load their MsgSpec dependencies
into *msg_context*.
NOTE: *msg_search_path* is only for finding .msg files. ``.srv``
files have a separate and distinct search path. As services
cannot depend on other services, it is not necessary to provide
the srv search path here.
:param msg_context: :class:`MsgContext` instance to load dependencies into/from.
:param spec: :class:`MsgSpec` or :class:`SrvSpec` instance to load dependencies for.
:param msg_search_path: dictionary mapping message namespaces to a directory locations.
:raises: :exc:`MsgNotFound` If dependency cannot be located.
"""
if isinstance(spec, MsgSpec):
return load_msg_depends(msg_context, spec, msg_search_path)
elif isinstance(spec, SrvSpec):
depends = load_msg_depends(msg_context, spec.request, msg_search_path)
depends.extend(load_msg_depends(msg_context, spec.response, msg_search_path))
return depends
else:
raise ValueError("spec does not appear to be a message or service")
class MsgContext(object):
"""
Context object for storing :class:`MsgSpec` instances and related
metadata.
NOTE: All APIs work on :class:`MsgSpec` instance information.
Thus, for services, there is information for the request and
response messages, but there is no direct information about the
:class:`SrvSpec` instance.
"""
def __init__(self):
self._registered_packages = {}
self._files = {}
self._dependencies = {}
def set_file(self, full_msg_type, file_path):
self._files[full_msg_type] = file_path
def get_file(self, full_msg_type):
return self._files.get(full_msg_type, None)
def set_depends(self, full_msg_type, dependencies):
"""
:param dependencies: direct first order
dependencies for *full_msg_type*
"""
log("set_depends", full_msg_type, dependencies)
self._dependencies[full_msg_type] = dependencies
def get_depends(self, full_msg_type):
"""
:returns: List of dependencies for *full_msg_type*,
only first order dependencies
"""
return self._dependencies.get(full_msg_type, None)
def get_all_depends(self, full_msg_type):
all_deps = []
depends = self.get_depends(full_msg_type)
if depends is None:
raise KeyError(full_msg_type)
for d in depends:
all_deps.extend([d])
all_deps.extend(self.get_all_depends(d))
return all_deps
@staticmethod
def create_default():
msg_context = MsgContext()
# register builtins (needed for serialization). builtins have no package.
load_msg_from_string(msg_context, TIME_MSG, TIME)
load_msg_from_string(msg_context, DURATION_MSG, DURATION)
return msg_context
def register(self, full_msg_type, msgspec):
full_msg_type = bare_msg_type(full_msg_type)
package, base_type = package_resource_name(full_msg_type)
if package not in self._registered_packages:
self._registered_packages[package] = {}
self._registered_packages[package][base_type] = msgspec
def is_registered(self, full_msg_type):
"""
:param full_msg_type: Fully resolve message type
:param default_package: default package namespace to resolve
in. May be ignored by special types (e.g. time/duration).
:returns: ``True`` if :class:`MsgSpec` instance has been loaded for the requested type.
"""
full_msg_type = bare_msg_type(full_msg_type)
package, base_type = package_resource_name(full_msg_type)
if package in self._registered_packages:
return base_type in self._registered_packages[package]
else:
return False
def get_registered(self, full_msg_type):
"""
:raises: :exc:`KeyError` If not registered
"""
full_msg_type = bare_msg_type(full_msg_type)
if self.is_registered(full_msg_type):
package, base_type = package_resource_name(full_msg_type)
return self._registered_packages[package][base_type]
else:
raise KeyError(full_msg_type)
def __str__(self):
return str(self._registered_packages)
def load_srv_from_string(msg_context, text, full_name):
"""
Load :class:`SrvSpec` from the .srv file.
:param msg_context: :class:`MsgContext` instance to load request/response messages into.
:param text: .msg text , ``str``
:param package_name: context to use for msg type name, i.e. the package name,
or '' to use local naming convention. ``str``
:returns: :class:`SrvSpec` instance
:raises :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
text_in = StringIO()
text_out = StringIO()
accum = text_in
for l in text.split('\n'):
l = l.split(COMMENTCHAR)[0].strip() #strip comments
if l.startswith(IODELIM): #lenient, by request
accum = text_out
else:
accum.write(l+'\n')
# create separate MsgSpec objects for each half of file
msg_in = load_msg_from_string(msg_context, text_in.getvalue(), '%sRequest'%(full_name))
msg_out = load_msg_from_string(msg_context, text_out.getvalue(), '%sResponse'%(full_name))
return SrvSpec(msg_in, msg_out, text, full_name)
def load_srv_from_file(msg_context, file_path, full_name):
"""
Convert the .srv representation in the file to a :class:`SrvSpec` instance.
:param msg_context: :class:`MsgContext` instance to load request/response messages into.
:param file_name: name of file to load from, ``str``
:returns: :class:`SrvSpec` instance
:raise: :exc:`InvalidMsgSpec` If syntax errors or other problems are detected in file
"""
log("Load spec from %s %s\n"%(file_path, full_name))
with open(file_path, 'r') as f:
text = f.read()
spec = load_srv_from_string(msg_context, text, full_name)
msg_context.set_file('%sRequest'%(full_name), file_path)
msg_context.set_file('%sResponse'%(full_name), file_path)
return spec
| gpl-3.0 |
helldorado/ansible | lib/ansible/module_utils/network/routeros/routeros.py | 38 | 5341 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# (c) 2016 Red Hat Inc.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.network.common.utils import to_list, ComplexList
from ansible.module_utils.connection import Connection, ConnectionError
_DEVICE_CONFIGS = {}
routeros_provider_spec = {
'host': dict(),
'port': dict(type='int'),
'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
'timeout': dict(type='int')
}
routeros_argument_spec = {}
def get_provider_argspec():
return routeros_provider_spec
def get_connection(module):
if hasattr(module, '_routeros_connection'):
return module._routeros_connection
capabilities = get_capabilities(module)
network_api = capabilities.get('network_api')
if network_api == 'cliconf':
module._routeros_connection = Connection(module._socket_path)
else:
module.fail_json(msg='Invalid connection type %s' % network_api)
return module._routeros_connection
def get_capabilities(module):
if hasattr(module, '_routeros_capabilities'):
return module._routeros_capabilities
capabilities = Connection(module._socket_path).get_capabilities()
module._routeros_capabilities = json.loads(capabilities)
return module._routeros_capabilities
def get_defaults_flag(module):
connection = get_connection(module)
try:
out = connection.get('/system default-configuration print')
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
out = to_text(out, errors='surrogate_then_replace')
commands = set()
for line in out.splitlines():
if line.strip():
commands.add(line.strip().split()[0])
if 'all' in commands:
return ['all']
else:
return ['full']
def get_config(module, flags=None):
flag_str = ' '.join(to_list(flags))
try:
return _DEVICE_CONFIGS[flag_str]
except KeyError:
connection = get_connection(module)
try:
out = connection.get_config(flags=flags)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
cfg = to_text(out, errors='surrogate_then_replace').strip()
_DEVICE_CONFIGS[flag_str] = cfg
return cfg
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
connection = get_connection(module)
for cmd in to_list(commands):
if isinstance(cmd, dict):
command = cmd['command']
prompt = cmd['prompt']
answer = cmd['answer']
else:
command = cmd
prompt = None
answer = None
try:
out = connection.get(command, prompt, answer)
except ConnectionError as exc:
module.fail_json(msg=to_text(exc, errors='surrogate_then_replace'))
try:
out = to_text(out, errors='surrogate_or_strict')
except UnicodeError:
module.fail_json(
msg=u'Failed to decode output from %s: %s' % (cmd, to_text(out)))
responses.append(out)
return responses
def load_config(module, commands):
connection = get_connection(module)
out = connection.edit_config(commands)
| gpl-3.0 |
nerzhul/ansible | lib/ansible/playbook/task.py | 17 | 16666 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.compat.six import iteritems, string_types
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils._text import to_native
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping, AnsibleUnicode
from ansible.plugins import lookup_loader
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.become import Become
from ansible.playbook.block import Block
from ansible.playbook.conditional import Conditional
from ansible.playbook.loop_control import LoopControl
from ansible.playbook.role import Role
from ansible.playbook.taggable import Taggable
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['Task']
class Task(Base, Conditional, Taggable, Become):
"""
A task is a language feature that represents a call to a module, with given arguments and other parameters.
A handler is a subclass of a task.
Usage:
Task.load(datastructure) -> Task
Task.something(...)
"""
# =================================================================================
# ATTRIBUTES
# load_<attribute_name> and
# validate_<attribute_name>
# will be used if defined
# might be possible to define others
_args = FieldAttribute(isa='dict', default=dict())
_action = FieldAttribute(isa='string')
_any_errors_fatal = FieldAttribute(isa='bool')
_async = FieldAttribute(isa='int', default=0)
_changed_when = FieldAttribute(isa='list', default=[])
_delay = FieldAttribute(isa='int', default=5)
_delegate_to = FieldAttribute(isa='string')
_delegate_facts = FieldAttribute(isa='bool', default=False)
_failed_when = FieldAttribute(isa='list', default=[])
_loop = FieldAttribute(isa='string', private=True, inherit=False)
_loop_args = FieldAttribute(isa='list', private=True, inherit=False)
_loop_control = FieldAttribute(isa='class', class_type=LoopControl, inherit=False)
_name = FieldAttribute(isa='string', default='')
_notify = FieldAttribute(isa='list')
_poll = FieldAttribute(isa='int')
_register = FieldAttribute(isa='string')
_retries = FieldAttribute(isa='int')
_until = FieldAttribute(isa='list', default=[])
def __init__(self, block=None, role=None, task_include=None):
''' constructors a task, without the Task.load classmethod, it will be pretty blank '''
self._role = role
self._parent = None
if task_include:
self._parent = task_include
else:
self._parent = block
super(Task, self).__init__()
def get_path(self):
''' return the absolute path of the task with its line number '''
path = ""
if hasattr(self, '_ds') and hasattr(self._ds, '_data_source') and hasattr(self._ds, '_line_number'):
path = "%s:%s" % (self._ds._data_source, self._ds._line_number)
return path
def get_name(self):
''' return the name of the task '''
if self._role and self.name and ("%s : " % self._role._role_name) not in self.name:
return "%s : %s" % (self._role.get_name(), self.name)
elif self.name:
return self.name
else:
if self._role:
return "%s : %s" % (self._role.get_name(), self.action)
else:
return "%s" % (self.action,)
def _merge_kv(self, ds):
if ds is None:
return ""
elif isinstance(ds, string_types):
return ds
elif isinstance(ds, dict):
buf = ""
for (k,v) in iteritems(ds):
if k.startswith('_'):
continue
buf = buf + "%s=%s " % (k,v)
buf = buf.strip()
return buf
@staticmethod
def load(data, block=None, role=None, task_include=None, variable_manager=None, loader=None):
t = Task(block=block, role=role, task_include=task_include)
return t.load_data(data, variable_manager=variable_manager, loader=loader)
def __repr__(self):
''' returns a human readable representation of the task '''
if self.get_name() == 'meta':
return "TASK: meta (%s)" % self.args['_raw_params']
else:
return "TASK: %s" % self.get_name()
def _preprocess_loop(self, ds, new_ds, k, v):
''' take a lookup plugin name and store it correctly '''
loop_name = k.replace("with_", "")
if new_ds.get('loop') is not None:
raise AnsibleError("duplicate loop in task: %s" % loop_name, obj=ds)
if v is None:
raise AnsibleError("you must specify a value when using %s" % k, obj=ds)
new_ds['loop'] = loop_name
new_ds['loop_args'] = v
def preprocess_data(self, ds):
'''
tasks are especially complex arguments so need pre-processing.
keep it short.
'''
assert isinstance(ds, dict)
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure suitable for the
# attributes of the task class
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
# use the args parsing class to determine the action, args,
# and the delegate_to value from the various possible forms
# supported as legacy
args_parser = ModuleArgsParser(task_ds=ds)
try:
(action, args, delegate_to) = args_parser.parse()
except AnsibleParserError as e:
raise AnsibleParserError(to_native(e), obj=ds)
# the command/shell/script modules used to support the `cmd` arg,
# which corresponds to what we now call _raw_params, so move that
# value over to _raw_params (assuming it is empty)
if action in ('command', 'shell', 'script'):
if 'cmd' in args:
if args.get('_raw_params', '') != '':
raise AnsibleError("The 'cmd' argument cannot be used when other raw parameters are specified."
" Please put everything in one or the other place.", obj=ds)
args['_raw_params'] = args.pop('cmd')
new_ds['action'] = action
new_ds['args'] = args
new_ds['delegate_to'] = delegate_to
# we handle any 'vars' specified in the ds here, as we may
# be adding things to them below (special handling for includes).
# When that deprecated feature is removed, this can be too.
if 'vars' in ds:
# _load_vars is defined in Base, and is used to load a dictionary
# or list of dictionaries in a standard way
new_ds['vars'] = self._load_vars(None, ds.get('vars'))
else:
new_ds['vars'] = dict()
for (k,v) in iteritems(ds):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action or k == 'shell':
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
continue
elif k.replace("with_", "") in lookup_loader:
self._preprocess_loop(ds, new_ds, k, v)
else:
# pre-2.0 syntax allowed variables for include statements at the
# top level of the task, so we move those into the 'vars' dictionary
# here, and show a deprecation message as we will remove this at
# some point in the future.
if action == 'include' and k not in self._valid_attrs and k not in self.DEPRECATED_ATTRIBUTES:
display.deprecated("Specifying include variables at the top-level of the task is deprecated."
" Please see:\nhttp://docs.ansible.com/ansible/playbooks_roles.html#task-include-files-and-encouraging-reuse\n\n"
" for currently supported syntax regarding included files and variables")
new_ds['vars'][k] = v
else:
new_ds[k] = v
return super(Task, self).preprocess_data(new_ds)
def _load_loop_control(self, attr, ds):
if not isinstance(ds, dict):
raise AnsibleParserError(
"the `loop_control` value must be specified as a dictionary and cannot "
"be a variable itself (though it can contain variables)",
obj=ds,
)
return LoopControl.load(data=ds, variable_manager=self._variable_manager, loader=self._loader)
def post_validate(self, templar):
'''
Override of base class post_validate, to also do final validation on
the block and task include (if any) to which this task belongs.
'''
if self._parent:
self._parent.post_validate(templar)
super(Task, self).post_validate(templar)
def _post_validate_loop_args(self, attr, value, templar):
'''
Override post validation for the loop args field, which is templated
specially in the TaskExecutor class when evaluating loops.
'''
return value
def _post_validate_environment(self, attr, value, templar):
'''
Override post validation of vars on the play, as we don't want to
template these too early.
'''
if value is None:
return dict()
elif isinstance(value, list):
if len(value) == 1:
return templar.template(value[0], convert_bare=True)
else:
env = []
for env_item in value:
if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables:
env[env_item] = templar.template(env_item, convert_bare=False)
elif isinstance(value, dict):
env = dict()
for env_item in value:
if isinstance(env_item, (string_types, AnsibleUnicode)) and env_item in templar._available_variables:
env[env_item] = templar.template(value[env_item], convert_bare=False)
# at this point it should be a simple string
return templar.template(value, convert_bare=True)
def _post_validate_changed_when(self, attr, value, templar):
'''
changed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def _post_validate_failed_when(self, attr, value, templar):
'''
failed_when is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def _post_validate_until(self, attr, value, templar):
'''
until is evaluated after the execution of the task is complete,
and should not be templated during the regular post_validate step.
'''
return value
def get_vars(self):
all_vars = dict()
if self._parent:
all_vars.update(self._parent.get_vars())
all_vars.update(self.vars)
if 'tags' in all_vars:
del all_vars['tags']
if 'when' in all_vars:
del all_vars['when']
return all_vars
def get_include_params(self):
all_vars = dict()
if self._parent:
all_vars.update(self._parent.get_include_params())
if self.action in ('include', 'include_role'):
all_vars.update(self.vars)
return all_vars
def copy(self, exclude_parent=False, exclude_tasks=False):
new_me = super(Task, self).copy()
new_me._parent = None
if self._parent and not exclude_parent:
new_me._parent = self._parent.copy(exclude_tasks=exclude_tasks)
new_me._role = None
if self._role:
new_me._role = self._role
return new_me
def serialize(self):
data = super(Task, self).serialize()
if not self._squashed and not self._finalized:
if self._parent:
data['parent'] = self._parent.serialize()
data['parent_type'] = self._parent.__class__.__name__
if self._role:
data['role'] = self._role.serialize()
return data
def deserialize(self, data):
# import is here to avoid import loops
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.handler_task_include import HandlerTaskInclude
parent_data = data.get('parent', None)
if parent_data:
parent_type = data.get('parent_type')
if parent_type == 'Block':
p = Block()
elif parent_type == 'TaskInclude':
p = TaskInclude()
elif parent_type == 'HandlerTaskInclude':
p = HandlerTaskInclude()
p.deserialize(parent_data)
self._parent = p
del data['parent']
role_data = data.get('role')
if role_data:
r = Role()
r.deserialize(role_data)
self._role = r
del data['role']
super(Task, self).deserialize(data)
def set_loader(self, loader):
'''
Sets the loader on this object and recursively on parent, child objects.
This is used primarily after the Task has been serialized/deserialized, which
does not preserve the loader.
'''
self._loader = loader
if self._parent:
self._parent.set_loader(loader)
def _get_parent_attribute(self, attr, extend=False, prepend=False):
'''
Generic logic to get the attribute or parent attribute for a task value.
'''
value = None
try:
value = self._attributes[attr]
if self._parent and (value is None or extend):
parent_value = getattr(self._parent, attr, None)
if extend:
value = self._extend_value(value, parent_value, prepend)
else:
value = parent_value
except KeyError:
pass
return value
def _get_attr_environment(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
return self._get_parent_attribute('environment', extend=True)
def get_dep_chain(self):
if self._parent:
return self._parent.get_dep_chain()
else:
return None
def get_search_path(self):
'''
Return the list of paths you should search for files, in order.
This follows role/playbook dependency chain.
'''
path_stack = []
dep_chain = self.get_dep_chain()
# inside role: add the dependency chain from current to dependent
if dep_chain:
path_stack.extend(reversed([x._role_path for x in dep_chain]))
# add path of task itself, unless it is already in the list
task_dir = os.path.dirname(self.get_path())
if task_dir not in path_stack:
path_stack.append(task_dir)
return path_stack
def all_parents_static(self):
if self._parent:
return self._parent.all_parents_static()
return True
| gpl-3.0 |
mpvoss/RickAndMortyWeatherTweets | env/lib/python3.5/site-packages/urllib3/fields.py | 288 | 5943 | from __future__ import absolute_import
import email.utils
import mimetypes
from .packages import six
def guess_content_type(filename, default='application/octet-stream'):
"""
Guess the "Content-Type" of a file.
:param filename:
The filename to guess the "Content-Type" of using :mod:`mimetypes`.
:param default:
If no "Content-Type" can be guessed, default to `default`.
"""
if filename:
return mimetypes.guess_type(filename)[0] or default
return default
def format_header_param(name, value):
"""
Helper function to format and quote a single header parameter.
Particularly useful for header parameters which might contain
non-ASCII values, like file names. This follows RFC 2231, as
suggested by RFC 2388 Section 4.4.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
if not any(ch in value for ch in '"\\\r\n'):
result = '%s="%s"' % (name, value)
try:
result.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
else:
return result
if not six.PY3 and isinstance(value, six.text_type): # Python 2:
value = value.encode('utf-8')
value = email.utils.encode_rfc2231(value, 'utf-8')
value = '%s*=%s' % (name, value)
return value
class RequestField(object):
"""
A data container for request body parameters.
:param name:
The name of this request field.
:param data:
The data/value body.
:param filename:
An optional filename of the request field.
:param headers:
An optional dict-like object of headers to initially use for the field.
"""
def __init__(self, name, data, filename=None, headers=None):
self._name = name
self._filename = filename
self.data = data
self.headers = {}
if headers:
self.headers = dict(headers)
@classmethod
def from_tuples(cls, fieldname, value):
"""
A :class:`~urllib3.fields.RequestField` factory from old-style tuple parameters.
Supports constructing :class:`~urllib3.fields.RequestField` from
parameter of key/value strings AND key/filetuple. A filetuple is a
(filename, data, MIME type) tuple where the MIME type is optional.
For example::
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
Field names and filenames must be unicode.
"""
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = guess_content_type(filename)
else:
filename = None
content_type = None
data = value
request_param = cls(fieldname, data, filename=filename)
request_param.make_multipart(content_type=content_type)
return request_param
def _render_part(self, name, value):
"""
Overridable helper function to format a single header parameter.
:param name:
The name of the parameter, a string expected to be ASCII only.
:param value:
The value of the parameter, provided as a unicode string.
"""
return format_header_param(name, value)
def _render_parts(self, header_parts):
"""
Helper function to format and quote a single header.
Useful for single headers that are composed of multiple items. E.g.,
'Content-Disposition' fields.
:param header_parts:
A sequence of (k, v) typles or a :class:`dict` of (k, v) to format
as `k1="v1"; k2="v2"; ...`.
"""
parts = []
iterable = header_parts
if isinstance(header_parts, dict):
iterable = header_parts.items()
for name, value in iterable:
if value is not None:
parts.append(self._render_part(name, value))
return '; '.join(parts)
def render_headers(self):
"""
Renders the headers for this request field.
"""
lines = []
sort_keys = ['Content-Disposition', 'Content-Type', 'Content-Location']
for sort_key in sort_keys:
if self.headers.get(sort_key, False):
lines.append('%s: %s' % (sort_key, self.headers[sort_key]))
for header_name, header_value in self.headers.items():
if header_name not in sort_keys:
if header_value:
lines.append('%s: %s' % (header_name, header_value))
lines.append('\r\n')
return '\r\n'.join(lines)
def make_multipart(self, content_disposition=None, content_type=None,
content_location=None):
"""
Makes this request field into a multipart request field.
This method overrides "Content-Disposition", "Content-Type" and
"Content-Location" headers to the request parameter.
:param content_type:
The 'Content-Type' of the request body.
:param content_location:
The 'Content-Location' of the request body.
"""
self.headers['Content-Disposition'] = content_disposition or 'form-data'
self.headers['Content-Disposition'] += '; '.join([
'', self._render_parts(
(('name', self._name), ('filename', self._filename))
)
])
self.headers['Content-Type'] = content_type
self.headers['Content-Location'] = content_location
| mit |
espdev/readthedocs.org | readthedocs/bookmarks/models.py | 34 | 1190 | from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, ugettext
from readthedocs.builds.models import Version
from readthedocs.projects.models import Project
class Bookmark(models.Model):
user = models.ForeignKey(User, verbose_name=_('User'),
related_name='bookmarks')
project = models.ForeignKey(Project, verbose_name=_('Project'),
related_name='bookmarks')
version = models.ForeignKey(Version, verbose_name=_('Version'),
related_name='bookmarks')
page = models.CharField(_('Page'), max_length=255)
date = models.DateTimeField(_('Date'), auto_now_add=True)
url = models.CharField(_('URL'), max_length=255, null=True, blank=True)
class Meta:
ordering = ['-date']
unique_together = ('user', 'project', 'version', 'page')
def __unicode__(self):
return ugettext(u"Bookmark %(url)s for %(user)s (%(pk)s)") % {
'url': self.url,
'user': self.user,
'pk': self.pk,
}
def get_absolute_url(self):
return self.url
| mit |
rdkdd/tp-spice | spice/tests/qxl_log.py | 1 | 1405 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
"""Examine Xorg log in guest for qxl presence.
"""
import logging
from spice.lib import act
from spice.lib import stest
from spice.lib import utils
logger = logging.getLogger(__name__)
def run(vt_test, test_params, env):
"""Inspects Xorg logs for QLX presence.
Parameters
----------
vt_test : avocado.core.plugins.vt.VirtTest
QEMU test object.
test_params : virttest.utils_params.Params
Dictionary with the test parameters.
env : virttest.utils_env.Env
Dictionary with test environment.
Raises
------
TestFail
Test fails for some reason.
"""
test = stest.GuestTest(vt_test, test_params, env)
cfg = test.cfg
act.x_active(test.vmi)
cmd = utils.Cmd("grep", "-i", "qxl", cfg.qxl_log)
exit_code, output = act.rstatus(test.vmi, cmd)
assert exit_code == 0
act.info(test.vmi, "Mention about qxl: %s." % output)
| gpl-2.0 |
tomchristie/django | tests/auth_tests/test_forms.py | 13 | 35929 | import datetime
import re
from unittest import mock
from django import forms
from django.contrib.auth.forms import (
AdminPasswordChangeForm, AuthenticationForm, PasswordChangeForm,
PasswordResetForm, ReadOnlyPasswordHashField, ReadOnlyPasswordHashWidget,
SetPasswordForm, UserChangeForm, UserCreationForm,
)
from django.contrib.auth.models import User
from django.contrib.auth.signals import user_login_failed
from django.contrib.sites.models import Site
from django.core import mail
from django.core.mail import EmailMultiAlternatives
from django.forms.fields import CharField, Field, IntegerField
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils import translation
from django.utils.text import capfirst
from django.utils.translation import gettext as _
from .models.custom_user import (
CustomUser, CustomUserWithoutIsActiveField, ExtensionUser,
)
from .models.with_custom_email_field import CustomEmailField
from .models.with_integer_username import IntegerUsernameUser
from .settings import AUTH_TEMPLATES
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.u1 = User.objects.create_user(username='testclient', password='password', email='testclient@example.com')
cls.u2 = User.objects.create_user(username='inactive', password='password', is_active=False)
cls.u3 = User.objects.create_user(username='staff', password='password')
cls.u4 = User.objects.create(username='empty_password', password='')
cls.u5 = User.objects.create(username='unmanageable_password', password='$')
cls.u6 = User.objects.create(username='unknown_password', password='foo$bar')
class UserCreationFormTest(TestDataMixin, TestCase):
def test_user_already_exists(self):
data = {
'username': 'testclient',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["username"].errors,
[str(User._meta.get_field('username').error_messages['unique'])])
def test_invalid_data(self):
data = {
'username': 'jsmith!',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_password_verification(self):
# The verification password is incorrect.
data = {
'username': 'jsmith',
'password1': 'test123',
'password2': 'test',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form["password2"].errors,
[str(form.error_messages['password_mismatch'])])
def test_both_passwords(self):
# One (or both) passwords weren't given
data = {'username': 'jsmith'}
form = UserCreationForm(data)
required_error = [str(Field.default_error_messages['required'])]
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, required_error)
data['password2'] = 'test123'
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['password1'].errors, required_error)
self.assertEqual(form['password2'].errors, [])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
data = {
'username': 'jsmith@example.com',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
u = form.save()
self.assertEqual(password_changed.call_count, 1)
self.assertEqual(repr(u), '<User: jsmith@example.com>')
def test_unicode_username(self):
data = {
'username': '宝',
'password1': 'test123',
'password2': 'test123',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
u = form.save()
self.assertEqual(u.username, '宝')
def test_normalize_username(self):
# The normalization happens in AbstractBaseUser.clean() and ModelForm
# validation calls Model.clean().
ohm_username = 'testΩ' # U+2126 OHM SIGN
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
user = form.save()
self.assertNotEqual(user.username, ohm_username)
self.assertEqual(user.username, 'testΩ') # U+03A9 GREEK CAPITAL LETTER OMEGA
def test_duplicate_normalized_unicode(self):
"""
To prevent almost identical usernames, visually identical but differing
by their unicode code points only, Unicode NFKC normalization should
make appear them equal to Django.
"""
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
self.assertNotEqual(omega_username, ohm_username)
User.objects.create_user(username=omega_username, password='pwd')
data = {
'username': ohm_username,
'password1': 'pwd2',
'password2': 'pwd2',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['username'], ["A user with that username already exists."]
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
}
form = UserCreationForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form['password2'].errors), 2)
self.assertIn('The password is too similar to the username.', form['password2'].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form['password2'].errors
)
def test_custom_form(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = ExtensionUser
fields = UserCreationForm.Meta.fields + ('date_of_birth',)
data = {
'username': 'testclient',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_with_different_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUser
fields = ('email', 'date_of_birth')
data = {
'email': 'test@client222.com',
'password1': 'testclient',
'password2': 'testclient',
'date_of_birth': '1988-02-24',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_custom_form_hidden_username_field(self):
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = CustomUserWithoutIsActiveField
fields = ('email',) # without USERNAME_FIELD
data = {
'email': 'testclient@example.com',
'password1': 'testclient',
'password2': 'testclient',
}
form = CustomUserCreationForm(data)
self.assertTrue(form.is_valid())
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password1': ' testpassword ',
'password2': ' testpassword ',
}
form = UserCreationForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_password_help_text(self):
form = UserCreationForm()
self.assertEqual(
form.fields['password1'].help_text,
'<ul><li>Your password can't be too similar to your other personal information.</li></ul>'
)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
])
def test_user_create_form_validates_password_with_all_data(self):
"""UserCreationForm password validation uses all of the form's data."""
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('username', 'email', 'first_name', 'last_name')
form = CustomUserCreationForm({
'username': 'testuser',
'password1': 'testpassword',
'password2': 'testpassword',
'first_name': 'testpassword',
'last_name': 'lastname',
})
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors['password2'],
['The password is too similar to the first name.'],
)
# To verify that the login form rejects inactive users, use an authentication
# backend that allows them.
@override_settings(AUTHENTICATION_BACKENDS=['django.contrib.auth.backends.AllowAllUsersModelBackend'])
class AuthenticationFormTest(TestDataMixin, TestCase):
def test_invalid_username(self):
# The user submits an invalid username.
data = {
'username': 'jsmith_does_not_exist',
'password': 'test123',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.non_field_errors(), [
form.error_messages['invalid_login'] % {
'username': User._meta.get_field('username').verbose_name
}
]
)
def test_inactive_user(self):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
def test_login_failed(self):
signal_calls = []
def signal_handler(**kwargs):
signal_calls.append(kwargs)
user_login_failed.connect(signal_handler)
fake_request = object()
try:
form = AuthenticationForm(fake_request, {
'username': 'testclient',
'password': 'incorrect',
})
self.assertFalse(form.is_valid())
self.assertIs(signal_calls[0]['request'], fake_request)
finally:
user_login_failed.disconnect(signal_handler)
def test_inactive_user_i18n(self):
with self.settings(USE_I18N=True), translation.override('pt-br', deactivate=True):
# The user is inactive.
data = {
'username': 'inactive',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), [str(form.error_messages['inactive'])])
def test_custom_login_allowed_policy(self):
# The user is inactive, but our custom form policy allows them to log in.
data = {
'username': 'inactive',
'password': 'password',
}
class AuthenticationFormWithInactiveUsersOkay(AuthenticationForm):
def confirm_login_allowed(self, user):
pass
form = AuthenticationFormWithInactiveUsersOkay(None, data)
self.assertTrue(form.is_valid())
# If we want to disallow some logins according to custom logic,
# we should raise a django.forms.ValidationError in the form.
class PickyAuthenticationForm(AuthenticationForm):
def confirm_login_allowed(self, user):
if user.username == "inactive":
raise forms.ValidationError("This user is disallowed.")
raise forms.ValidationError("Sorry, nobody's allowed in.")
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ['This user is disallowed.'])
data = {
'username': 'testclient',
'password': 'password',
}
form = PickyAuthenticationForm(None, data)
self.assertFalse(form.is_valid())
self.assertEqual(form.non_field_errors(), ["Sorry, nobody's allowed in."])
def test_success(self):
# The success case
data = {
'username': 'testclient',
'password': 'password',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_unicode_username(self):
User.objects.create_user(username='Σαρα', password='pwd')
data = {
'username': 'Σαρα',
'password': 'pwd',
}
form = AuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.non_field_errors(), [])
def test_username_field_label(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label="Name", max_length=75)
form = CustomAuthenticationForm()
self.assertEqual(form['username'].label, "Name")
def test_username_field_label_not_set(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField()
form = CustomAuthenticationForm()
username_field = User._meta.get_field(User.USERNAME_FIELD)
self.assertEqual(form.fields['username'].label, capfirst(username_field.verbose_name))
def test_username_field_label_empty_string(self):
class CustomAuthenticationForm(AuthenticationForm):
username = CharField(label='')
form = CustomAuthenticationForm()
self.assertEqual(form.fields['username'].label, "")
def test_password_whitespace_not_stripped(self):
data = {
'username': 'testuser',
'password': ' pass ',
}
form = AuthenticationForm(None, data)
form.is_valid() # Not necessary to have valid credentails for the test.
self.assertEqual(form.cleaned_data['password'], data['password'])
@override_settings(AUTH_USER_MODEL='auth_tests.IntegerUsernameUser')
def test_integer_username(self):
class CustomAuthenticationForm(AuthenticationForm):
username = IntegerField()
user = IntegerUsernameUser.objects.create_user(username=0, password='pwd')
data = {
'username': 0,
'password': 'pwd',
}
form = CustomAuthenticationForm(None, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['username'], data['username'])
self.assertEqual(form.cleaned_data['password'], data['password'])
self.assertEqual(form.errors, {})
self.assertEqual(form.user_cache, user)
class SetPasswordFormTest(TestDataMixin, TestCase):
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(
form["new_password2"].errors,
[str(form.error_messages['password_mismatch'])]
)
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_validates_password(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': 'testclient',
'new_password2': 'testclient',
}
form = SetPasswordForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(len(form["new_password2"].errors), 2)
self.assertIn('The password is too similar to the username.', form["new_password2"].errors)
self.assertIn(
'This password is too short. It must contain at least 12 characters.',
form["new_password2"].errors
)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'new_password1': ' password ',
'new_password2': ' password ',
}
form = SetPasswordForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
@override_settings(AUTH_PASSWORD_VALIDATORS=[
{'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator'},
{'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', 'OPTIONS': {
'min_length': 12,
}},
])
def test_help_text_translation(self):
french_help_texts = [
'Votre mot de passe ne peut pas trop ressembler à vos autres informations personnelles.',
'Votre mot de passe doit contenir au minimum 12 caractères.',
]
form = SetPasswordForm(self.u1)
with translation.override('fr'):
html = form.as_p()
for french_text in french_help_texts:
self.assertIn(french_text, html)
class PasswordChangeFormTest(TestDataMixin, TestCase):
def test_incorrect_password(self):
user = User.objects.get(username='testclient')
data = {
'old_password': 'test',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["old_password"].errors, [str(form.error_messages['password_incorrect'])])
def test_password_verification(self):
# The two new passwords do not match.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc',
}
form = PasswordChangeForm(user, data)
self.assertFalse(form.is_valid())
self.assertEqual(form["new_password2"].errors, [str(form.error_messages['password_mismatch'])])
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
# The success case.
user = User.objects.get(username='testclient')
data = {
'old_password': 'password',
'new_password1': 'abc123',
'new_password2': 'abc123',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_field_order(self):
# Regression test - check the order of fields:
user = User.objects.get(username='testclient')
self.assertEqual(list(PasswordChangeForm(user, {}).fields), ['old_password', 'new_password1', 'new_password2'])
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
user.set_password(' oldpassword ')
data = {
'old_password': ' oldpassword ',
'new_password1': ' pass ',
'new_password2': ' pass ',
}
form = PasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['old_password'], data['old_password'])
self.assertEqual(form.cleaned_data['new_password1'], data['new_password1'])
self.assertEqual(form.cleaned_data['new_password2'], data['new_password2'])
class UserChangeFormTest(TestDataMixin, TestCase):
def test_username_validity(self):
user = User.objects.get(username='testclient')
data = {'username': 'not valid'}
form = UserChangeForm(data, instance=user)
self.assertFalse(form.is_valid())
validator = next(v for v in User._meta.get_field('username').validators if v.code == 'invalid')
self.assertEqual(form["username"].errors, [str(validator.message)])
def test_bug_14242(self):
# A regression test, introduce by adding an optimization for the
# UserChangeForm.
class MyUserForm(UserChangeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['groups'].help_text = 'These groups give users different permissions'
class Meta(UserChangeForm.Meta):
fields = ('groups',)
# Just check we can create it
MyUserForm({})
def test_unusable_password(self):
user = User.objects.get(username='empty_password')
user.set_unusable_password()
user.save()
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_empty_password(self):
user = User.objects.get(username='empty_password')
form = UserChangeForm(instance=user)
self.assertIn(_("No password set."), form.as_table())
def test_bug_17944_unmanageable_password(self):
user = User.objects.get(username='unmanageable_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_17944_unknown_password_algorithm(self):
user = User.objects.get(username='unknown_password')
form = UserChangeForm(instance=user)
self.assertIn(_("Invalid password format or unknown hashing algorithm."), form.as_table())
def test_bug_19133(self):
"The change form does not return the password value"
# Use the form to construct the POST data
user = User.objects.get(username='testclient')
form_for_data = UserChangeForm(instance=user)
post_data = form_for_data.initial
# The password field should be readonly, so anything
# posted here should be ignored; the form will be
# valid, and give back the 'initial' value for the
# password field.
post_data['password'] = 'new password'
form = UserChangeForm(instance=user, data=post_data)
self.assertTrue(form.is_valid())
# original hashed password contains $
self.assertIn('$', form.cleaned_data['password'])
def test_bug_19349_bound_password_field(self):
user = User.objects.get(username='testclient')
form = UserChangeForm(data={}, instance=user)
# When rendering the bound password field,
# ReadOnlyPasswordHashWidget needs the initial
# value to render correctly
self.assertEqual(form.initial['password'], form['password'].value())
def test_custom_form(self):
class CustomUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = ExtensionUser
fields = ('username', 'password', 'date_of_birth',)
user = User.objects.get(username='testclient')
data = {
'username': 'testclient',
'password': 'testclient',
'date_of_birth': '1998-02-24',
}
form = CustomUserChangeForm(data, instance=user)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['username'], 'testclient')
self.assertEqual(form.cleaned_data['date_of_birth'], datetime.date(1998, 2, 24))
@override_settings(TEMPLATES=AUTH_TEMPLATES)
class PasswordResetFormTest(TestDataMixin, TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
# This cleanup is necessary because contrib.sites cache
# makes tests interfere with each other, see #11505
Site.objects.clear_cache()
def create_dummy_user(self):
"""
Create a user and return a tuple (user_object, username, email).
"""
username = 'jsmith'
email = 'jsmith@example.com'
user = User.objects.create_user(username, email, 'test123')
return (user, username, email)
def test_invalid_email(self):
data = {'email': 'not valid'}
form = PasswordResetForm(data)
self.assertFalse(form.is_valid())
self.assertEqual(form['email'].errors, [_('Enter a valid email address.')])
def test_nonexistent_email(self):
"""
Test nonexistent email address. This should not fail because it would
expose information about registered users.
"""
data = {'email': 'foo@bar.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
self.assertEqual(len(mail.outbox), 0)
def test_cleaned_data(self):
(user, username, email) = self.create_dummy_user()
data = {'email': email}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
form.save(domain_override='example.com')
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
def test_custom_email_subject(self):
data = {'email': 'testclient@example.com'}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Custom password reset on example.com')
def test_custom_email_constructor(self):
data = {'email': 'testclient@example.com'}
class CustomEmailPasswordResetForm(PasswordResetForm):
def send_mail(self, subject_template_name, email_template_name,
context, from_email, to_email,
html_email_template_name=None):
EmailMultiAlternatives(
"Forgot your password?",
"Sorry to hear you forgot your password.",
None, [to_email],
['site_monitor@example.com'],
headers={'Reply-To': 'webmaster@example.com'},
alternatives=[
("Really sorry to hear you forgot your password.", "text/html")
],
).send()
form = CustomEmailPasswordResetForm(data)
self.assertTrue(form.is_valid())
# Since we're not providing a request object, we must provide a
# domain_override to prevent the save operation from failing in the
# potential case where contrib.sites is not installed. Refs #16412.
form.save(domain_override='example.com')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, 'Forgot your password?')
self.assertEqual(mail.outbox[0].bcc, ['site_monitor@example.com'])
self.assertEqual(mail.outbox[0].content_subtype, "plain")
def test_preserve_username_case(self):
"""
Preserve the case of the user name (before the @ in the email address)
when creating a user (#5605).
"""
user = User.objects.create_user('forms_test2', 'tesT@EXAMple.com', 'test')
self.assertEqual(user.email, 'tesT@example.com')
user = User.objects.create_user('forms_test3', 'tesT', 'test')
self.assertEqual(user.email, 'tesT')
def test_inactive_user(self):
"""
Inactive user cannot receive password reset email.
"""
(user, username, email) = self.create_dummy_user()
user.is_active = False
user.save()
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_unusable_password(self):
user = User.objects.create_user('testuser', 'test@example.com', 'test')
data = {"email": "test@example.com"}
form = PasswordResetForm(data)
self.assertTrue(form.is_valid())
user.set_unusable_password()
user.save()
form = PasswordResetForm(data)
# The form itself is valid, but no email is sent
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 0)
def test_save_plaintext_email(self):
"""
Test the PasswordResetForm.save() method with no html_email_template_name
parameter passed in.
Test to ensure original behavior is unchanged after the parameter was added.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(mail.outbox), 1)
message = mail.outbox[0].message()
self.assertFalse(message.is_multipart())
self.assertEqual(message.get_content_type(), 'text/plain')
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(mail.outbox[0].alternatives), 0)
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w+/-]', message.get_payload()))
def test_save_html_email_template_name(self):
"""
Test the PasswordResetFOrm.save() method with html_email_template_name
parameter specified.
Test to ensure that a multipart email is sent with both text/plain
and text/html parts.
"""
(user, username, email) = self.create_dummy_user()
form = PasswordResetForm({"email": email})
self.assertTrue(form.is_valid())
form.save(html_email_template_name='registration/html_password_reset_email.html')
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(len(mail.outbox[0].alternatives), 1)
message = mail.outbox[0].message()
self.assertEqual(message.get('subject'), 'Custom password reset on example.com')
self.assertEqual(len(message.get_payload()), 2)
self.assertTrue(message.is_multipart())
self.assertEqual(message.get_payload(0).get_content_type(), 'text/plain')
self.assertEqual(message.get_payload(1).get_content_type(), 'text/html')
self.assertEqual(message.get_all('to'), [email])
self.assertTrue(re.match(r'^http://example.com/reset/[\w/-]+', message.get_payload(0).get_payload()))
self.assertTrue(re.match(
r'^<html><a href="http://example.com/reset/[\w/-]+/">Link</a></html>$',
message.get_payload(1).get_payload()
))
@override_settings(AUTH_USER_MODEL='auth_tests.CustomEmailField')
def test_custom_email_field(self):
email = 'test@mail.com'
CustomEmailField.objects.create_user('test name', 'test password', email)
form = PasswordResetForm({'email': email})
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(form.cleaned_data['email'], email)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [email])
class ReadOnlyPasswordHashTest(SimpleTestCase):
def test_bug_19349_render_with_none_value(self):
# Rendering the widget with value set to None
# mustn't raise an exception.
widget = ReadOnlyPasswordHashWidget()
html = widget.render(name='password', value=None, attrs={})
self.assertIn(_("No password set."), html)
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.PBKDF2PasswordHasher'])
def test_render(self):
widget = ReadOnlyPasswordHashWidget()
value = 'pbkdf2_sha256$100000$a6Pucb1qSFcD$WmCkn9Hqidj48NVe5x0FEM6A9YiOqQcl/83m2Z5udm0='
self.assertHTMLEqual(
widget.render('name', value, {'id': 'id_password'}),
"""
<div id="id_password">
<strong>algorithm</strong>: pbkdf2_sha256
<strong>iterations</strong>: 100000
<strong>salt</strong>: a6Pucb******
<strong>hash</strong>: WmCkn9**************************************
</div>
"""
)
def test_readonly_field_has_changed(self):
field = ReadOnlyPasswordHashField()
self.assertFalse(field.has_changed('aaa', 'bbb'))
class AdminPasswordChangeFormTest(TestDataMixin, TestCase):
@mock.patch('django.contrib.auth.password_validation.password_changed')
def test_success(self, password_changed):
user = User.objects.get(username='testclient')
data = {
'password1': 'test123',
'password2': 'test123',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
form.save(commit=False)
self.assertEqual(password_changed.call_count, 0)
form.save()
self.assertEqual(password_changed.call_count, 1)
def test_password_whitespace_not_stripped(self):
user = User.objects.get(username='testclient')
data = {
'password1': ' pass ',
'password2': ' pass ',
}
form = AdminPasswordChangeForm(user, data)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['password1'], data['password1'])
self.assertEqual(form.cleaned_data['password2'], data['password2'])
| bsd-3-clause |
arctelix/django-avatar | avatar/templatetags/avatar_tags.py | 1 | 3536 | import urllib
import urlparse
import hashlib
from django import template
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from avatar.settings import (AVATAR_GRAVATAR_BACKUP, AVATAR_GRAVATAR_DEFAULT,
AVATAR_DEFAULT_SIZE, AVATAR_GRAVATAR_BASE_URL)
from avatar.util import (
get_primary_avatar, get_default_avatar_url, cache_result, User, get_user)
from avatar.models import Avatar
register = template.Library()
@cache_result
@register.simple_tag
def avatar_url(user, size=AVATAR_DEFAULT_SIZE):
avatar = get_primary_avatar(user, size=size)
if avatar:
if not avatar.thumbnail_exists(size):
avatar.create_thumbnail(size)
url = avatar.avatar_url(size)
return url
return get_default_avatar_url(user, size)
@cache_result
@register.simple_tag
def avatar(user, size=AVATAR_DEFAULT_SIZE, template=None, **kwargs):
if not isinstance(user, User):
try:
user = get_user(user)
alt = unicode(user)
url = avatar_url(user, size)
except User.DoesNotExist:
url = get_default_avatar_url()
alt = _("Default Avatar")
else:
alt = unicode(user)
url = avatar_url(user, size)
context = dict(kwargs, **{
'user': user,
'url': url,
'alt': alt,
'size': size,
})
if template == 'img':
return """<img src="%s" alt="%s" width="%s" height="%s" />""" % (url, alt,
size, size)
if not template:
template = 'avatar/avatar_tag.html'
return render_to_string(template, context)
@register.filter
def has_avatar(user):
if not isinstance(user, User):
return False
return Avatar.objects.filter(user=user, primary=True).exists()
@cache_result
@register.simple_tag
def primary_avatar(user, size=AVATAR_DEFAULT_SIZE):
"""
This tag tries to get the default avatar for a user without doing any db
requests. It achieve this by linking to a special view that will do all the
work for us. If that special view is then cached by a CDN for instance,
we will avoid many db calls.
"""
alt = unicode(user)
url = reverse('avatar_render_primary', kwargs={'user': user, 'size': size})
return """<img src="%s" alt="%s" width="%s" height="%s" />""" % (url, alt,
size, size)
@cache_result
@register.simple_tag
def render_avatar(avatar, size=AVATAR_DEFAULT_SIZE):
return """<img src="%s" alt="%s" width="%s" height="%s" />""" % (
avatar.avatar_url(size), str(avatar), size, size)
def primary_avatar_object(parser, token):
split = token.split_contents()
if len(split) == 4:
return UsersAvatarObjectNode(split[1], split[3])
else:
raise template.TemplateSyntaxError('%r tag takes three arguments.' % split[0])
class UsersAvatarObjectNode(template.Node):
def __init__(self, user, key):
self.user = template.Variable(user)
self.key = key
def render(self, context):
user = self.user.resolve(context)
key = self.key
avatar = Avatar.objects.filter(user=user, primary=True)
if avatar:
context[key] = avatar[0]
else:
context[key] = None
return u""
register.tag('primary_avatar_object', primary_avatar_object)
@cache_result
@register.simple_tag
def avatar_choice_url(avatar, size=AVATAR_DEFAULT_SIZE):
return avatar.avatar_url(size)
| bsd-3-clause |
robbielynch/RoblySearch | robly_parser/query_parser.py | 1 | 4160 | import re
from stemming.porter2 import stem
from robly_data import stop_words
class QueryParser(object):
"""
The QueryParser class parses the context and the search query form the string
typed by the user.
"""
search_query = ""
search_context = ""
initial_search_query = ""
search_tokens = []
stemmed_tokens = []
#Available Context Strings
site = "site"
image = "image"
document = "doc"
def __init__(self, initial_search_query=""):
self.initial_search_query = initial_search_query.lower()
def extract_context_and_search_query(self):
"""
Extracts search context and search query from the query.
Search query then broken down into:
- Context
- Search Query
"""
if ":" in self.initial_search_query:
colon_index = self.initial_search_query.index(':')
print("colon index = ", colon_index)
if colon_index >= 0:
rest_of_query = self.initial_search_query[colon_index + 1:]
if len(rest_of_query) > 0:
#Get the query without the context
self.search_query = prune_string(rest_of_query)
#Get context
self.search_context = self.initial_search_query[:colon_index]
if self.search_context == self.site or self.search_context == self.image or self.search_context == self.document:
self.search_context = self.search_context
return (self.search_query, self.search_context)
else:
self.search_context = ""
return (self.search_query, self.search_context)
else:
self.search_context = ""
self.search_query = prune_string(self.initial_search_query)
return (self.search_query, self.search_context)
else:
#no context
#search normally
self.search_context = ""
self.search_query = prune_string(self.search_query)
return (self.search_query, self.search_context)
else:
self.search_query = prune_string(self.initial_search_query)
return (self.search_query, self.search_context)
def prune_string(string):
"""
Function to remove unwanted character from string,
remove stop words from string,
stem the words in the string,
convert to lowercase
Params: string The string that is to be pruned
Returns: string The pruned string
"""
string = remove_unwanted_chars(string).lower()
string = tokenise_string(string)
string = stem_token_list(string)
string = remove_stop_words(string)
string = tokens_to_string(string).lower()
return string
def remove_unwanted_chars(string):
"""
Function to remove the unwanted/unnecessary chars from a string
"""
string = re.sub('[,!.;:#)(\]|"#$%^&_<>~=\[\\{}\(\)]', '', string)
return string
def tokenise_string(string):
"""
Splits the passed string into a list of strings, delimited by a space character
"""
return string.split()
def stem_token_list(words):
"""
Function that uses the porter stemming algorithm to remove suffixes(and in some cases prefixes)
in order to find the "root word" or stem of a given word.
"""
stemmed_tokens = []
for word in words:
w = stem(word)
stemmed_tokens.append(w)
return stemmed_tokens
def remove_stop_words(tokens):
"""
Removes the list of imported stop_words from the list of tokens
"""
for word in stop_words.STOP_WORDS:
tokens = remove_values_from_list(tokens, word)
return tokens
def remove_values_from_list(the_list, value_to_be_removed):
"""
This function removes a given value from the given list
Returns: The list, minus all occurrences of value_value_to_be_removed
"""
return [value.lower() for value in the_list if value != value_to_be_removed]
def tokens_to_string(tokens):
return " ".join(tokens) | gpl-2.0 |
soarpenguin/ansible | lib/ansible/module_utils/facts/namespace.py | 60 | 1377 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
class FactNamespace:
def __init__(self, namespace_name):
self.namespace_name = namespace_name
def transform(self, name):
'''Take a text name, and transforms it as needed (add a namespace prefix, etc)'''
return name
def _underscore(self, name):
return name.replace('-', '_')
class PrefixFactNamespace(FactNamespace):
def __init__(self, namespace_name, prefix=None):
super(PrefixFactNamespace, self).__init__(namespace_name)
self.prefix = prefix
def transform(self, name):
new_name = self._underscore(name)
return '%s%s' % (self.prefix, new_name)
| gpl-3.0 |
w1ll1am23/home-assistant | homeassistant/components/aemet/abstract_aemet_sensor.py | 3 | 1822 | """Abstraction form AEMET OpenData sensors."""
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import ATTRIBUTION, SENSOR_DEVICE_CLASS, SENSOR_NAME, SENSOR_UNIT
from .weather_update_coordinator import WeatherUpdateCoordinator
class AbstractAemetSensor(CoordinatorEntity, SensorEntity):
"""Abstract class for an AEMET OpenData sensor."""
def __init__(
self,
name,
unique_id,
sensor_type,
sensor_configuration,
coordinator: WeatherUpdateCoordinator,
):
"""Initialize the sensor."""
super().__init__(coordinator)
self._name = name
self._unique_id = unique_id
self._sensor_type = sensor_type
self._sensor_name = sensor_configuration[SENSOR_NAME]
self._unit_of_measurement = sensor_configuration.get(SENSOR_UNIT)
self._device_class = sensor_configuration.get(SENSOR_DEVICE_CLASS)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {self._sensor_name}"
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def device_class(self):
"""Return the device_class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
| apache-2.0 |
KaranToor/MA450 | google-cloud-sdk/.install/.backup/platform/gsutil/third_party/apitools/apitools/base/py/base_cli.py | 21 | 4990 | #!/usr/bin/env python
#
# Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base script for generated CLI."""
import atexit
import code
import logging
import os
import readline
import rlcompleter
import sys
from google.apputils import appcommands
import gflags as flags
from apitools.base.py import encoding
from apitools.base.py import exceptions
__all__ = [
'ConsoleWithReadline',
'DeclareBaseFlags',
'FormatOutput',
'SetupLogger',
'run_main',
]
# TODO(craigcitro): We should move all the flags for the
# StandardQueryParameters into this file, so that they can be used
# elsewhere easily.
_BASE_FLAGS_DECLARED = False
_OUTPUT_FORMATTER_MAP = {
'protorpc': lambda x: x,
'json': encoding.MessageToJson,
}
def DeclareBaseFlags():
"""Declare base flags for all CLIs."""
# TODO(craigcitro): FlagValidators?
global _BASE_FLAGS_DECLARED # pylint: disable=global-statement
if _BASE_FLAGS_DECLARED:
return
flags.DEFINE_boolean(
'log_request', False,
'Log requests.')
flags.DEFINE_boolean(
'log_response', False,
'Log responses.')
flags.DEFINE_boolean(
'log_request_response', False,
'Log requests and responses.')
flags.DEFINE_enum(
'output_format',
'protorpc',
_OUTPUT_FORMATTER_MAP.keys(),
'Display format for results.')
_BASE_FLAGS_DECLARED = True
FLAGS = flags.FLAGS
def SetupLogger():
if FLAGS.log_request or FLAGS.log_response or FLAGS.log_request_response:
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
def FormatOutput(message, output_format=None):
"""Convert the output to the user-specified format."""
output_format = output_format or FLAGS.output_format
formatter = _OUTPUT_FORMATTER_MAP.get(FLAGS.output_format)
if formatter is None:
raise exceptions.UserError('Unknown output format: %s' % output_format)
return formatter(message)
class _SmartCompleter(rlcompleter.Completer):
def _callable_postfix(self, val, word):
if ('(' in readline.get_line_buffer() or
not callable(val)):
return word
else:
return word + '('
def complete(self, text, state):
if not readline.get_line_buffer().strip():
if not state:
return ' '
else:
return None
return rlcompleter.Completer.complete(self, text, state)
class ConsoleWithReadline(code.InteractiveConsole):
"""InteractiveConsole with readline, tab completion, and history."""
def __init__(self, env, filename='<console>', histfile=None):
new_locals = dict(env)
new_locals.update({
'_SmartCompleter': _SmartCompleter,
'readline': readline,
'rlcompleter': rlcompleter,
})
code.InteractiveConsole.__init__(self, new_locals, filename)
readline.parse_and_bind('tab: complete')
readline.set_completer(_SmartCompleter(new_locals).complete)
if histfile is not None:
histfile = os.path.expanduser(histfile)
if os.path.exists(histfile):
readline.read_history_file(histfile)
atexit.register(lambda: readline.write_history_file(histfile))
def run_main(): # pylint: disable=invalid-name
"""Function to be used as setuptools script entry point.
Appcommands assumes that it always runs as __main__, but launching
via a setuptools-generated entry_point breaks this rule. We do some
trickery here to make sure that appcommands and flags find their
state where they expect to by faking ourselves as __main__.
"""
# Put the flags for this module somewhere the flags module will look
# for them.
# pylint: disable=protected-access
new_name = flags._GetMainModule()
sys.modules[new_name] = sys.modules['__main__']
for flag in FLAGS.FlagsByModuleDict().get(__name__, []):
FLAGS._RegisterFlagByModule(new_name, flag)
for key_flag in FLAGS.KeyFlagsByModuleDict().get(__name__, []):
FLAGS._RegisterKeyFlagForModule(new_name, key_flag)
# pylint: enable=protected-access
# Now set __main__ appropriately so that appcommands will be
# happy.
sys.modules['__main__'] = sys.modules[__name__]
appcommands.Run()
sys.modules['__main__'] = sys.modules.pop(new_name)
if __name__ == '__main__':
appcommands.Run()
| apache-2.0 |
OpenChemistry/tomviz | acquisition/tests/mock/tiltseries/__init__.py | 1 | 2016 | from threading import Thread
import datetime
import time
import os
from PIL import Image
from .. import test_image, test_dm3_tilt_series
class TIFFWriter(Thread):
def __init__(self, path, delay=1):
"""
Thread to write a TIFF image stack to a particular path. The
files are written using the following naming convention
<timestamp>_<tilt_angle>.tif
:param path: The path to write the images to.
:type path: str
:param delay: The time in seconds to wait between writing images.
:type delay: int
"""
super(TIFFWriter, self).__init__()
self.daemon = True
self._path = path
self._delay = delay
self.img = Image.open(test_image())
self.series_size = self.img.n_frames
def run(self):
for index, angle in enumerate(range(-73, 74, 2)):
self.img.seek(index)
timestamp = datetime.datetime.now().strftime('%m_%d_%y_%H_%M_%S')
filename = '%s_%+d.tif' % (timestamp, angle)
file_path = os.path.join(self._path, filename)
with open(file_path, 'wb') as fp:
self.img.save(fp, 'TIFF')
time.sleep(self._delay)
class DM3Writer(Thread):
def __init__(self, path, delay=1):
"""
Thread to write a DM3 tilt series to a particular path.
:param path: The path to write the images to.
:type path: str
:param delay: The time in seconds to wait between writing images.
:type delay: int
"""
super(DM3Writer, self).__init__()
self.daemon = True
self._path = path
self._delay = delay
self._files = list(test_dm3_tilt_series())
self.series_size = len(self._files)
def run(self):
for (filename, dm3fp) in self._files:
file_path = os.path.join(self._path, filename)
with open(file_path, 'wb') as fp:
fp.write(dm3fp.read())
time.sleep(self._delay)
| bsd-3-clause |
cubaeurokatmic/gui | lib/python/Tools/CIHelper.py | 18 | 5416 | from xml.etree.cElementTree import parse
from Tools.XMLTools import elementsWithTag, mergeText, stringToXML
from enigma import eDVBCIInterfaces, eDVBCI_UI, eEnv, eServiceCenter, eServiceReference
from timer import TimerEntry
import NavigationInstance
import os
class CIHelper:
CI_ASSIGNMENT_LIST = None
CI_ASSIGNMENT_SERVICES_LIST = None
CI_MULTIDESCRAMBLE = None
CI_MULTIDESCRAMBLE_MODULES = ("AlphaCrypt", )
def parse_ci_assignment(self):
NUM_CI=eDVBCIInterfaces.getInstance().getNumOfSlots()
if NUM_CI > 0:
self.CI_ASSIGNMENT_LIST=[]
def getValue(definitions, default):
ret = ""
Len = len(definitions)
return Len > 0 and definitions[Len-1].text or default
for ci in range(NUM_CI):
filename = eEnv.resolve("${sysconfdir}/enigma2/ci") + str(ci) + ".xml"
if not os.path.exists(filename):
continue
try:
tree = parse(filename).getroot()
read_services=[]
read_providers=[]
usingcaid=[]
for slot in tree.findall("slot"):
read_slot = getValue(slot.findall("id"), False).encode("UTF-8")
for caid in slot.findall("caid"):
read_caid = caid.get("id").encode("UTF-8")
usingcaid.append(long(read_caid,16))
for service in slot.findall("service"):
read_service_ref = service.get("ref").encode("UTF-8")
read_services.append (read_service_ref)
for provider in slot.findall("provider"):
read_provider_name = provider.get("name").encode("UTF-8")
read_provider_dvbname = provider.get("dvbnamespace").encode("UTF-8")
read_providers.append((read_provider_name,long(read_provider_dvbname,16)))
self.CI_ASSIGNMENT_LIST.append((int(read_slot), (read_services, read_providers, usingcaid)))
except:
print "[CI_ASSIGNMENT %d] error parsing xml..." % ci
services = []
providers = []
for item in self.CI_ASSIGNMENT_LIST:
print "[CI_Activate] activate CI%d with following settings:" % item[0]
print item[1]
try:
eDVBCIInterfaces.getInstance().setDescrambleRules(item[0],item[1])
except:
print "[CI_Activate_Config_CI%d] error setting DescrambleRules..." %item[0]
for x in item[1][0]:
services.append(x)
for x in item[1][1]:
providers.append(x[0])
service_refs = []
if len(services):
for x in services:
service_refs.append(eServiceReference(x))
provider_services_refs = []
if len(providers):
provider_services_refs = self.getProivderServices(providers)
self.CI_ASSIGNMENT_SERVICES_LIST = [service_refs, provider_services_refs]
def load_ci_assignment(self, force = False):
if self.CI_ASSIGNMENT_LIST is None or force:
self.parse_ci_assignment()
def getProivderServices(self, providers):
provider_services_refs = []
if len(providers):
serviceHandler = eServiceCenter.getInstance()
for x in providers:
refstr = '1:7:0:0:0:0:0:0:0:0:(provider == "%s") && (type == 1) || (type == 17) || (type == 22) || (type == 25) || (type == 31) || (type == 134) || (type == 195) ORDER BY name:%s' % (x,x)
myref = eServiceReference(refstr)
servicelist = serviceHandler.list(myref)
if not servicelist is None:
while True:
service = servicelist.getNext()
if not service.valid():
break
provider_services_refs.append(service)
return provider_services_refs
def ServiceIsAssigned(self, ref):
self.load_ci_assignment()
if self.CI_ASSIGNMENT_SERVICES_LIST:
for x in self.CI_ASSIGNMENT_SERVICES_LIST:
if len(x) and ref in x:
return True
return False
def canMultiDescramble(self, ref):
if self.CI_MULTIDESCRAMBLE is None:
no_ci = eDVBCIInterfaces.getInstance().getNumOfSlots()
if no_ci > 0:
self.CI_MULTIDESCRAMBLE = False
for ci in range(no_ci):
appname = eDVBCI_UI.getInstance().getAppName(ci)
if appname in self.CI_MULTIDESCRAMBLE_MODULES:
self.CI_MULTIDESCRAMBLE = True
elif self.CI_MULTIDESCRAMBLE == False:
return False
if self.CI_ASSIGNMENT_LIST is not None and len(self.CI_ASSIGNMENT_LIST):
for x in self.CI_ASSIGNMENT_LIST:
if ref.toString() in x[1][0]:
appname = eDVBCI_UI.getInstance().getAppName(x[0])
if appname in self.CI_MULTIDESCRAMBLE_MODULES:
return True
for x in self.CI_ASSIGNMENT_LIST:
f_providers = x[1][1]
if len(f_providers):
providers = []
for prov in f_providers:
providers.append(prov[0])
provider_services_refs = self.getProivderServices(providers)
if ref in provider_services_refs:
appname = eDVBCI_UI.getInstance().getAppName(x[0])
if appname in self.CI_MULTIDESCRAMBLE_MODULES:
return True
return False
def isPlayable(self, service):
service = eServiceReference(service)
if NavigationInstance.instance.getRecordings():
if self.ServiceIsAssigned(service):
for timer in NavigationInstance.instance.RecordTimer.timer_list:
if timer.state == TimerEntry.StateRunning:
if timer.justplay:
pass
else:
timerservice = timer.service_ref.ref
if timerservice != service:
if self.ServiceIsAssigned(timerservice):
if self.canMultiDescramble(service):
for x in (4, 2, 3):
if timerservice.getUnsignedData(x) != service.getUnsignedData(x):
return 0
else:
return 0
return 1
cihelper = CIHelper()
def isPlayable(service):
ret = cihelper.isPlayable(service)
return ret
| gpl-2.0 |
olifre/root | tutorials/pyroot/staff.py | 23 | 2311 | ## \file
## \ingroup tutorial_pyroot
## \notebook -nodraw
## example of macro to read data from an ascii file and
## create a root file with a Tree.
##
## NOTE: comparing the results of this macro with those of staff.C, you'll
## notice that the resultant file is a couple of bytes smaller, because the
## code below strips all white-spaces, whereas the .C version does not.
##
## \macro_code
##
## \author Wim Lavrijsen
import re, array, os
import ROOT
from ROOT import TFile, TTree, gROOT, AddressOf
## A C/C++ structure is required, to allow memory based access
gROOT.ProcessLine(
"struct staff_t {\
Int_t Category;\
UInt_t Flag;\
Int_t Age;\
Int_t Service;\
Int_t Children;\
Int_t Grade;\
Int_t Step;\
Int_t Hrweek;\
Int_t Cost;\
Char_t Division[4];\
Char_t Nation[3];\
};" );
## Function to read in data from ASCII file and fill the ROOT tree
def staff():
staff = ROOT.staff_t()
# The input file cern.dat is a copy of the CERN staff data base
# from 1988
f = TFile( 'staff.root', 'RECREATE' )
tree = TTree( 'T', 'staff data from ascii file' )
tree.Branch( 'staff', staff, 'Category/I:Flag:Age:Service:Children:Grade:Step:Hrweek:Cost' )
tree.Branch( 'Divisions', AddressOf( staff, 'Division' ), 'Division/C' )
tree.Branch( 'Nation', AddressOf( staff, 'Nation' ), 'Nation/C' )
# note that the branches Division and Nation cannot be on the first branch
fname = os.path.join(str(ROOT.gROOT.GetTutorialDir()), 'tree', 'cernstaff.dat')
for line in open(fname).readlines():
t = list(filter( lambda x: x, re.split( '\s+', line ) ) )
staff.Category = int(t[0]) # assign as integers
staff.Flag = int(t[1])
staff.Age = int(t[2])
staff.Service = int(t[3])
staff.Children = int(t[4])
staff.Grade = int(t[5])
staff.Step = int(t[6])
staff.Hrweek = int(t[7])
staff.Cost = int(t[8])
staff.Division = t[9] # assign as strings
staff.Nation = t[10]
tree.Fill()
tree.Print()
tree.Write()
#### run fill function if invoked on CLI
if __name__ == '__main__':
staff()
| lgpl-2.1 |
stahlfabrik/RibbaPi | animation/moodlight.py | 1 | 5309 | #!/usr/bin/env python3
# RibbaPi - APA102 LED matrix controlled by Raspberry Pi in python
# Copyright (C) 2016 Christoph Stahl
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import time
import numpy as np
import random
import colorsys
from animation.abstract_animation import AbstractAnimation
class MoodlightAnimation(AbstractAnimation):
def __init__(self, width, height, frame_queue, repeat=False,
mode="wish_down_up"):
super().__init__(width, height, frame_queue, repeat)
self.mode = mode
self.colors = [(255, 0, 0), (255, 255, 0), (0, 255, 255), (0, 0, 255)] # if empty choose random colors
self.random = False # how to step through colors
self.hold = 10 # seconds to hold colors
self.transition_duration = 10 # seconds to change from one to other
self.frequency = 60 # frames per second
print("MoodlightAnimation created")
def ribbapi_hsv_to_rgb(self, h, s, v):
# h is in degrees
# s, v in percent
h %= 360
h /= 360
s /= 100
v /= 100
r, g, b = colorsys.hsv_to_rgb(h, s, v)
return (int(r * 255), int(g * 255), int(b * 255))
def ribbapi_rgb_to_hsv(self, r, g, b):
r /= 255
g /= 255
b /= 255
h, s, v = colorsys.rgb_to_hsv(r, g, b)
return (h * 360, s * 100, v * 100)
def color_wheel_generator(self, steps):
# steps: how many steps to take to go from 0 to 360.
increase = (360 - 0) / steps
while True:
for i in np.arange(0, 360, increase):
color = self.ribbapi_hsv_to_rgb(i, 100, 100)
yield color
def cycle_selected_colors_generator(self, steps, hold):
# steps: how many steps from one color to other color
# hold: how many iterations to stay at one color
current_color = None
while True:
for color in self.colors:
if not current_color:
current_color = color
yield color
else:
# rgb color
r, g, b = color
current_r, current_g, current_b = current_color
increase_r = (r - current_r) / steps
increase_g = (g - current_g) / steps
increase_b = (b - current_b) / steps
for _ in range(steps):
current_r += increase_r
current_g += increase_g
current_b += increase_b
current_color = (current_r, current_g, current_b)
color = (int(current_r), int(current_g), int(current_b))
yield color
for _ in range(hold):
yield color
def frame_generator(self, color_mode, style):
frame = np.zeros((self.height, self.width, 3), dtype=np.uint8)
if color_mode == "colorwheel":
colors = self.color_wheel_generator(500)
elif color_mode == "cyclecolors":
colors = self.cycle_selected_colors_generator(5, 100)
while True:
if style == "fill":
frame[:, :] = next(colors)
yield frame
elif style == "random_dot":
y = np.random.randint(0, self.height)
x = np.random.randint(0, self.width)
frame[y, x] = next(colors)
yield frame
elif style == "wish_down_up":
color = next(colors)
frame = np.concatenate((frame[1:16, :],
np.array(color * self.width).reshape(1, self.width, 3)), axis=0)
yield frame
def animate(self):
while self._running:
if self.mode == "colorwheel":
generator = self.frame_generator("colorwheel", "fill")
elif self.mode == "cyclecolors":
generator = self.frame_generator("cyclecolors", "random_dot")
elif self.mode == "wish_down_up":
generator = self.frame_generator("colorwheel", "wish_down_up")
for frame in generator:
if self._running:
self.frame_queue.put(frame.copy())
else:
break
time.sleep(1/self.frequency)
# if self.repeat > 0:
# self.repeat -= 1
# elif self.repeat == 0:
# self._running = False
@property
def kwargs(self):
return {"width": self.width, "height": self.height,
"frame_queue": self.frame_queue, "repeat": self.repeat}
| gpl-3.0 |
blueburningcoder/pybrain | examples/rl/environments/flexcube/flexcube_pgpe.py | 30 | 2997 | #!/usr/bin/env python
#########################################################################
# Reinforcement Learning with PGPE on the FlexCube Environment
#
# The FlexCube Environment is a Mass-Spring-System composed of 8 mass points.
# These resemble a cube with flexible edges.
#
# Control/Actions:
# The agent can control the 12 equilibrium edge lengths.
#
# A wide variety of sensors are available for observation and reward:
# - 12 edge lengths
# - 12 wanted edge lengths (the last action)
# - vertexes contact with floor
# - vertexes min height (distance of closest vertex to the floor)
# - distance to origin
# - distance and angle to target
#
# Task available are:
# - GrowTask, agent has to maximize the volume of the cube
# - JumpTask, agent has to maximize the distance of the lowest mass point during the episode
# - WalkTask, agent has to maximize the distance to the starting point
# - WalkDirectionTask, agent has to minimize the distance to a target point.
# - TargetTask, like the previous task but with several target points
#
# Requirements: pylab (for plotting only). If not available, comment the
# last 3 lines out
# Author: Frank Sehnke, sehnke@in.tum.de
#########################################################################
__author__ = "Frank Sehnke"
__version__ = '$Id$'
from pybrain.tools.example_tools import ExTools
from pybrain.structure.modules.tanhlayer import TanhLayer
from pybrain.tools.shortcuts import buildNetwork
from pybrain.rl.environments.flexcube import FlexCubeEnvironment, WalkTask
from pybrain.rl.agents import OptimizationAgent
from pybrain.optimization import PGPE
from pybrain.rl.experiments import EpisodicExperiment
hiddenUnits = 4
batch=1 #number of samples per learning step
prnts=1 #number of learning steps after results are printed
epis=5000000/batch/prnts #number of roleouts
numbExp=10 #number of experiments
et = ExTools(batch, prnts) #tool for printing and plotting
env = None
for runs in range(numbExp):
# create environment
#Options: Bool(OpenGL), Bool(Realtime simu. while client is connected), ServerIP(default:localhost), Port(default:21560)
if env != None: env.closeSocket()
env = FlexCubeEnvironment()
# create task
task = WalkTask(env)
# create controller network
net = buildNetwork(len(task.getObservation()), hiddenUnits, env.actLen, outclass=TanhLayer)
# create agent with controller and learner (and its options)
agent = OptimizationAgent(net, PGPE(storeAllEvaluations = True))
et.agent = agent
# create the experiment
experiment = EpisodicExperiment(task, agent)
#Do the experiment
for updates in range(epis):
for i in range(prnts):
experiment.doEpisodes(batch)
et.printResults((agent.learner._allEvaluations)[-50:-1], runs, updates)
et.addExps()
et.showExps()
#To view what the simulation is doing at the moment, go to pybrain/rl/environments/flexcube/ and start renderer.py (python-openGL musst be installed)
| bsd-3-clause |
fzheng/codejam | lib/python2.7/site-packages/tornado/queues.py | 78 | 10013 | # Copyright 2015 The Tornado Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, with_statement
__all__ = ['Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty']
import collections
import heapq
from tornado import gen, ioloop
from tornado.concurrent import Future
from tornado.locks import Event
class QueueEmpty(Exception):
"""Raised by `.Queue.get_nowait` when the queue has no items."""
pass
class QueueFull(Exception):
"""Raised by `.Queue.put_nowait` when a queue is at its maximum size."""
pass
def _set_timeout(future, timeout):
if timeout:
def on_timeout():
future.set_exception(gen.TimeoutError())
io_loop = ioloop.IOLoop.current()
timeout_handle = io_loop.add_timeout(timeout, on_timeout)
future.add_done_callback(
lambda _: io_loop.remove_timeout(timeout_handle))
class _QueueIterator(object):
def __init__(self, q):
self.q = q
def __anext__(self):
return self.q.get()
class Queue(object):
"""Coordinate producer and consumer coroutines.
If maxsize is 0 (the default) the queue size is unbounded.
.. testcode::
from tornado import gen
from tornado.ioloop import IOLoop
from tornado.queues import Queue
q = Queue(maxsize=2)
@gen.coroutine
def consumer():
while True:
item = yield q.get()
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
@gen.coroutine
def producer():
for item in range(5):
yield q.put(item)
print('Put %s' % item)
@gen.coroutine
def main():
# Start consumer without waiting (since it never finishes).
IOLoop.current().spawn_callback(consumer)
yield producer() # Wait for producer to put all tasks.
yield q.join() # Wait for consumer to finish all tasks.
print('Done')
IOLoop.current().run_sync(main)
.. testoutput::
Put 0
Put 1
Doing work on 0
Put 2
Doing work on 1
Put 3
Doing work on 2
Put 4
Doing work on 3
Doing work on 4
Done
In Python 3.5, `Queue` implements the async iterator protocol, so
``consumer()`` could be rewritten as::
async def consumer():
async for item in q:
try:
print('Doing work on %s' % item)
yield gen.sleep(0.01)
finally:
q.task_done()
.. versionchanged:: 4.3
Added ``async for`` support in Python 3.5.
"""
def __init__(self, maxsize=0):
if maxsize is None:
raise TypeError("maxsize can't be None")
if maxsize < 0:
raise ValueError("maxsize can't be negative")
self._maxsize = maxsize
self._init()
self._getters = collections.deque([]) # Futures.
self._putters = collections.deque([]) # Pairs of (item, Future).
self._unfinished_tasks = 0
self._finished = Event()
self._finished.set()
@property
def maxsize(self):
"""Number of items allowed in the queue."""
return self._maxsize
def qsize(self):
"""Number of items in the queue."""
return len(self._queue)
def empty(self):
return not self._queue
def full(self):
if self.maxsize == 0:
return False
else:
return self.qsize() >= self.maxsize
def put(self, item, timeout=None):
"""Put an item into the queue, perhaps waiting until there is room.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
try:
self.put_nowait(item)
except QueueFull:
future = Future()
self._putters.append((item, future))
_set_timeout(future, timeout)
return future
else:
return gen._null_future
def put_nowait(self, item):
"""Put an item into the queue without blocking.
If no free slot is immediately available, raise `QueueFull`.
"""
self._consume_expired()
if self._getters:
assert self.empty(), "queue non-empty, why are getters waiting?"
getter = self._getters.popleft()
self.__put_internal(item)
getter.set_result(self._get())
elif self.full():
raise QueueFull
else:
self.__put_internal(item)
def get(self, timeout=None):
"""Remove and return an item from the queue.
Returns a Future which resolves once an item is available, or raises
`tornado.gen.TimeoutError` after a timeout.
"""
future = Future()
try:
future.set_result(self.get_nowait())
except QueueEmpty:
self._getters.append(future)
_set_timeout(future, timeout)
return future
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Return an item if one is immediately available, else raise
`QueueEmpty`.
"""
self._consume_expired()
if self._putters:
assert self.full(), "queue not full, why are putters waiting?"
item, putter = self._putters.popleft()
self.__put_internal(item)
putter.set_result(None)
return self._get()
elif self.qsize():
return self._get()
else:
raise QueueEmpty
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each `.get` used to fetch a task, a
subsequent call to `.task_done` tells the queue that the processing
on the task is complete.
If a `.join` is blocking, it resumes when all items have been
processed; that is, when every `.put` is matched by a `.task_done`.
Raises `ValueError` if called more times than `.put`.
"""
if self._unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self._unfinished_tasks -= 1
if self._unfinished_tasks == 0:
self._finished.set()
def join(self, timeout=None):
"""Block until all items in the queue are processed.
Returns a Future, which raises `tornado.gen.TimeoutError` after a
timeout.
"""
return self._finished.wait(timeout)
@gen.coroutine
def __aiter__(self):
return _QueueIterator(self)
# These three are overridable in subclasses.
def _init(self):
self._queue = collections.deque()
def _get(self):
return self._queue.popleft()
def _put(self, item):
self._queue.append(item)
# End of the overridable methods.
def __put_internal(self, item):
self._unfinished_tasks += 1
self._finished.clear()
self._put(item)
def _consume_expired(self):
# Remove timed-out waiters.
while self._putters and self._putters[0][1].done():
self._putters.popleft()
while self._getters and self._getters[0].done():
self._getters.popleft()
def __repr__(self):
return '<%s at %s %s>' % (
type(self).__name__, hex(id(self)), self._format())
def __str__(self):
return '<%s %s>' % (type(self).__name__, self._format())
def _format(self):
result = 'maxsize=%r' % (self.maxsize, )
if getattr(self, '_queue', None):
result += ' queue=%r' % self._queue
if self._getters:
result += ' getters[%s]' % len(self._getters)
if self._putters:
result += ' putters[%s]' % len(self._putters)
if self._unfinished_tasks:
result += ' tasks=%s' % self._unfinished_tasks
return result
class PriorityQueue(Queue):
"""A `.Queue` that retrieves entries in priority order, lowest first.
Entries are typically tuples like ``(priority number, data)``.
.. testcode::
from tornado.queues import PriorityQueue
q = PriorityQueue()
q.put((1, 'medium-priority item'))
q.put((0, 'high-priority item'))
q.put((10, 'low-priority item'))
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
(0, 'high-priority item')
(1, 'medium-priority item')
(10, 'low-priority item')
"""
def _init(self):
self._queue = []
def _put(self, item):
heapq.heappush(self._queue, item)
def _get(self):
return heapq.heappop(self._queue)
class LifoQueue(Queue):
"""A `.Queue` that retrieves the most recently put items first.
.. testcode::
from tornado.queues import LifoQueue
q = LifoQueue()
q.put(3)
q.put(2)
q.put(1)
print(q.get_nowait())
print(q.get_nowait())
print(q.get_nowait())
.. testoutput::
1
2
3
"""
def _init(self):
self._queue = []
def _put(self, item):
self._queue.append(item)
def _get(self):
return self._queue.pop()
| mit |
rohit12/atomspace | tests/pln/test_rules_old.py | 38 | 2817 | import unittest
raise unittest.SkipTest("Unit test temporarily disabled - see: https://github.com/opencog/opencog/issues/442")
from unittest import TestCase
from opencog.atomspace import AtomSpace, types, Atom, TruthValue
from pln.rules import *
from pln.chainers import Chainer
class RulesTest(TestCase):
def setUp(self):
self.atomspace = AtomSpace()
self.chainer = Chainer(self.atomspace)
def tearDown(self):
del self.atomspace
del self.chainer
def _inh_animal_breathe(self):
'''InheritanceLink animal breathe'''
default_av = {'sti':1}
self.animal = self.atomspace.add_node(types.ConceptNode, "animal")
self.breathe = self.atomspace.add_node(types.ConceptNode, "breathe")
self.inh_animal_breathe = self.atomspace.add_link(types.InheritanceLink, [self.animal, self.breathe])
self.animal.tv = TruthValue(0.1, 1)
self.breathe.tv = TruthValue(0.1, 1)
self.inh_animal_breathe.tv = TruthValue(1, 1)
atoms = []
atoms.append( self.animal )
atoms.append( self.breathe )
atoms.append( self.inh_animal_breathe )
for atom in atoms:
atom.av = default_av
return atoms
# def _apply_rule(self, rule,
def test_standardize_apart_input_output(self):
rule = InversionRule(self.chainer, types.InheritanceLink)
(input, output) = rule.standardize_apart_input_output(self.chainer)
def test_InversionRule(self):
rule = InversionRule(self.chainer, types.InheritanceLink)
self._inh_animal_breathe()
result = self.chainer._apply_forward(rule)
print result
def test_InversionRule_backward(self):
rule = InversionRule(self.chainer, types.InheritanceLink)
self._inh_animal_breathe()
self.inh_breathe_animal = self.atomspace.add_link(types.InheritanceLink, [self.breathe, self.animal])
self.inh_breathe_animal.av = {'sti':1}
result = self.chainer._apply_backward(rule)
print result
def disabled_test_rules_generically(self):
'''See what happens if you give a rule the generic inputs. This makes sure that the rule and formula don't have any basic code errors, but doesn't check that they do the right thing.'''
def apply_rule(rule):
generic_inputs = rule.inputs
generic_outpus = rule.outputs
# Take the generic required input atoms and give them boring TVs
for atom in generic_inputs:
atom.av = {'sti':1}
atom.tv = TruthValue(1, 1)
status = self.chainer._apply_forward(rule)
self.assertNotEquals(status, None)
return None
for rule in self.chainer.rules:
apply_rule(rule)
| agpl-3.0 |
musicrighter/CIS422-P2 | env/lib/python3.4/site-packages/pip/_vendor/html5lib/utils.py | 436 | 3267 | from __future__ import absolute_import, division, unicode_literals
from types import ModuleType
from pip._vendor.six import text_type
try:
import xml.etree.cElementTree as default_etree
except ImportError:
import xml.etree.ElementTree as default_etree
__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
"surrogatePairToCodepoint", "moduleFactoryFactory",
"supports_lone_surrogates"]
# Platforms not supporting lone surrogates (\uD800-\uDFFF) should be
# caught by the below test. In general this would be any platform
# using UTF-16 as its encoding of unicode strings, such as
# Jython. This is because UTF-16 itself is based on the use of such
# surrogates, and there is no mechanism to further escape such
# escapes.
try:
_x = eval('"\\uD800"')
if not isinstance(_x, text_type):
# We need this with u"" because of http://bugs.jython.org/issue2039
_x = eval('u"\\uD800"')
assert isinstance(_x, text_type)
except:
supports_lone_surrogates = False
else:
supports_lone_surrogates = True
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name, value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
# Some utility functions to dal with weirdness around UCS2 vs UCS4
# python builds
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if isinstance(ModuleType.__name__, type("")):
name = "_%s_factory" % baseModule.__name__
else:
name = b"_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
return moduleFactory
| artistic-2.0 |
creativecommons/open-ledger | util/scheduled-snapshots/requests/packages/chardet/chardistribution.py | 2755 | 9226 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .euctwfreq import (EUCTWCharToFreqOrder, EUCTW_TABLE_SIZE,
EUCTW_TYPICAL_DISTRIBUTION_RATIO)
from .euckrfreq import (EUCKRCharToFreqOrder, EUCKR_TABLE_SIZE,
EUCKR_TYPICAL_DISTRIBUTION_RATIO)
from .gb2312freq import (GB2312CharToFreqOrder, GB2312_TABLE_SIZE,
GB2312_TYPICAL_DISTRIBUTION_RATIO)
from .big5freq import (Big5CharToFreqOrder, BIG5_TABLE_SIZE,
BIG5_TYPICAL_DISTRIBUTION_RATIO)
from .jisfreq import (JISCharToFreqOrder, JIS_TABLE_SIZE,
JIS_TYPICAL_DISTRIBUTION_RATIO)
from .compat import wrap_ord
ENOUGH_DATA_THRESHOLD = 1024
SURE_YES = 0.99
SURE_NO = 0.01
MINIMUM_DATA_THRESHOLD = 3
class CharDistributionAnalysis:
def __init__(self):
# Mapping table to get frequency order from char order (get from
# GetOrder())
self._mCharToFreqOrder = None
self._mTableSize = None # Size of above table
# This is a constant value which varies from language to language,
# used in calculating confidence. See
# http://www.mozilla.org/projects/intl/UniversalCharsetDetection.html
# for further detail.
self._mTypicalDistributionRatio = None
self.reset()
def reset(self):
"""reset analyser, clear any state"""
# If this flag is set to True, detection is done and conclusion has
# been made
self._mDone = False
self._mTotalChars = 0 # Total characters encountered
# The number of characters whose frequency order is less than 512
self._mFreqChars = 0
def feed(self, aBuf, aCharLen):
"""feed a character with known length"""
if aCharLen == 2:
# we only care about 2-bytes character in our distribution analysis
order = self.get_order(aBuf)
else:
order = -1
if order >= 0:
self._mTotalChars += 1
# order is valid
if order < self._mTableSize:
if 512 > self._mCharToFreqOrder[order]:
self._mFreqChars += 1
def get_confidence(self):
"""return confidence based on existing data"""
# if we didn't receive any character in our consideration range,
# return negative answer
if self._mTotalChars <= 0 or self._mFreqChars <= MINIMUM_DATA_THRESHOLD:
return SURE_NO
if self._mTotalChars != self._mFreqChars:
r = (self._mFreqChars / ((self._mTotalChars - self._mFreqChars)
* self._mTypicalDistributionRatio))
if r < SURE_YES:
return r
# normalize confidence (we don't want to be 100% sure)
return SURE_YES
def got_enough_data(self):
# It is not necessary to receive all data to draw conclusion.
# For charset detection, certain amount of data is enough
return self._mTotalChars > ENOUGH_DATA_THRESHOLD
def get_order(self, aBuf):
# We do not handle characters based on the original encoding string,
# but convert this encoding string to a number, here called order.
# This allows multiple encodings of a language to share one frequency
# table.
return -1
class EUCTWDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCTWCharToFreqOrder
self._mTableSize = EUCTW_TABLE_SIZE
self._mTypicalDistributionRatio = EUCTW_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-TW encoding, we are interested
# first byte range: 0xc4 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xC4:
return 94 * (first_char - 0xC4) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class EUCKRDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = EUCKRCharToFreqOrder
self._mTableSize = EUCKR_TABLE_SIZE
self._mTypicalDistributionRatio = EUCKR_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-KR encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char = wrap_ord(aBuf[0])
if first_char >= 0xB0:
return 94 * (first_char - 0xB0) + wrap_ord(aBuf[1]) - 0xA1
else:
return -1
class GB2312DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = GB2312CharToFreqOrder
self._mTableSize = GB2312_TABLE_SIZE
self._mTypicalDistributionRatio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
else:
return -1
class Big5DistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = Big5CharToFreqOrder
self._mTableSize = BIG5_TABLE_SIZE
self._mTypicalDistributionRatio = BIG5_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for big5 encoding, we are interested
# first byte range: 0xa4 -- 0xfe
# second byte range: 0x40 -- 0x7e , 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if first_char >= 0xA4:
if second_char >= 0xA1:
return 157 * (first_char - 0xA4) + second_char - 0xA1 + 63
else:
return 157 * (first_char - 0xA4) + second_char - 0x40
else:
return -1
class SJISDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for sjis encoding, we are interested
# first byte range: 0x81 -- 0x9f , 0xe0 -- 0xfe
# second byte range: 0x40 -- 0x7e, 0x81 -- oxfe
# no validation needed here. State machine has done that
first_char, second_char = wrap_ord(aBuf[0]), wrap_ord(aBuf[1])
if (first_char >= 0x81) and (first_char <= 0x9F):
order = 188 * (first_char - 0x81)
elif (first_char >= 0xE0) and (first_char <= 0xEF):
order = 188 * (first_char - 0xE0 + 31)
else:
return -1
order = order + second_char - 0x40
if second_char > 0x7F:
order = -1
return order
class EUCJPDistributionAnalysis(CharDistributionAnalysis):
def __init__(self):
CharDistributionAnalysis.__init__(self)
self._mCharToFreqOrder = JISCharToFreqOrder
self._mTableSize = JIS_TABLE_SIZE
self._mTypicalDistributionRatio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, aBuf):
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = wrap_ord(aBuf[0])
if char >= 0xA0:
return 94 * (char - 0xA1) + wrap_ord(aBuf[1]) - 0xa1
else:
return -1
| mit |
gnulinooks/sympy | sympy/polys/wrappers.py | 1 | 1754 |
from polynomial import Poly
def LexPoly(*args):
"""Returns a polynomial with lexicographic order of terms. """
return Poly(*args, **{ 'order' : 'lex' })
from algorithms import poly_div, poly_pdiv, poly_groebner, poly_lcm, poly_gcd, \
poly_half_gcdex, poly_gcdex, poly_sqf, poly_resultant, poly_subresultants, \
poly_decompose, poly_quo, poly_rem, poly_pquo, poly_prem
from rootfinding import poly_root_factors, poly_sturm
def _conv_args(n, args):
symbols = args[n:]
if len(symbols) == 1 and isinstance(symbols[0], (tuple, list)):
return args[:n] + tuple(symbols[0])
else:
return args
def _map_basic(f, n, *args, **kwargs):
result = f(*_conv_args(n, args), **kwargs)
if isinstance(result, (list, tuple, set)):
return result.__class__(g.as_basic() for g in result)
else:
return result.as_basic()
_funcs = {
'quo' : 2,
'rem' : 2,
'pdiv' : 2,
'pquo' : 2,
'prem' : 2,
'groebner' : 1,
'lcm' : 2,
'gcd' : 2,
'gcdex' : 2,
'half_gcdex' : 2,
'subresultants' : 2,
'resultant' : 2,
'sqf' : 1,
'decompose' : 1,
'root_factors' : 1,
'sturm' : 1,
}
_func_def = \
"""
def %s(*args, **kwargs):
return _map_basic(poly_%s, %d, *args, **kwargs)
%s.__doc__ = poly_%s.__doc__
"""
for _func, _n in _funcs.iteritems():
exec _func_def % (_func, _func, _n, _func, _func)
def div(*args, **kwargs):
q, r = poly_div(*_conv_args(2, args), **kwargs)
if type(q) is not list:
q = q.as_basic()
else:
q = [ p.as_basic() for p in q ]
return q, r.as_basic()
div.__doc__ = poly_div.__doc__
| bsd-3-clause |
80vs90/django-allauth | allauth/socialaccount/providers/soundcloud/views.py | 80 | 1040 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import SoundCloudProvider
class SoundCloudOAuth2Adapter(OAuth2Adapter):
provider_id = SoundCloudProvider.id
access_token_url = 'https://api.soundcloud.com/oauth2/token'
authorize_url = 'https://soundcloud.com/connect'
profile_url = 'https://api.soundcloud.com/me.json'
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.profile_url,
params={'oauth_token': token.token})
extra_data = resp.json()
return self.get_provider().sociallogin_from_response(request,
extra_data)
oauth2_login = OAuth2LoginView.adapter_view(SoundCloudOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(SoundCloudOAuth2Adapter)
| mit |
belmiromoreira/nova | nova/tests/unit/api/openstack/compute/contrib/test_extended_hypervisors.py | 32 | 4485 | # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from nova.api.openstack.compute.contrib import hypervisors as hypervisors_v2
from nova.api.openstack.compute.plugins.v3 import hypervisors \
as hypervisors_v21
from nova.api.openstack import extensions
from nova import exception
from nova import objects
from nova import test
from nova.tests.unit.api.openstack.compute.contrib import test_hypervisors
from nova.tests.unit.api.openstack import fakes
def fake_compute_node_get(context, compute_id):
for hyper in test_hypervisors.TEST_HYPERS_OBJ:
if hyper.id == int(compute_id):
return hyper
raise exception.ComputeHostNotFound(host=compute_id)
def fake_compute_node_get_all(context):
return test_hypervisors.TEST_HYPERS_OBJ
@classmethod
def fake_service_get_by_compute_host(cls, context, host):
for service in test_hypervisors.TEST_SERVICES:
if service.host == host:
return service
class ExtendedHypervisorsTestV21(test.NoDBTestCase):
DETAIL_HYPERS_DICTS = copy.deepcopy(test_hypervisors.TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
DETAIL_HYPERS_DICTS[0].update({'state': 'up',
'status': 'enabled',
'service': dict(id=1, host='compute1',
disabled_reason=None)})
DETAIL_HYPERS_DICTS[1].update({'state': 'up',
'status': 'enabled',
'service': dict(id=2, host='compute2',
disabled_reason=None)})
def _set_up_controller(self):
self.controller = hypervisors_v21.HypervisorsController()
self.controller.servicegroup_api.service_is_up = mock.MagicMock(
return_value=True)
def _get_request(self):
return fakes.HTTPRequest.blank('/v2/fake/os-hypervisors/detail',
use_admin_context=True)
def setUp(self):
super(ExtendedHypervisorsTestV21, self).setUp()
self._set_up_controller()
self.stubs.Set(self.controller.host_api, 'compute_node_get_all',
fake_compute_node_get_all)
self.stubs.Set(self.controller.host_api, 'compute_node_get',
fake_compute_node_get)
self.stubs.Set(objects.Service, 'get_by_compute_host',
fake_service_get_by_compute_host)
def test_view_hypervisor_detail_noservers(self):
result = self.controller._view_hypervisor(
test_hypervisors.TEST_HYPERS_OBJ[0],
test_hypervisors.TEST_SERVICES[0], True)
self.assertEqual(result, self.DETAIL_HYPERS_DICTS[0])
def test_detail(self):
req = self._get_request()
result = self.controller.detail(req)
self.assertEqual(result, dict(hypervisors=self.DETAIL_HYPERS_DICTS))
def test_show_withid(self):
req = self._get_request()
result = self.controller.show(req, '1')
self.assertEqual(result, dict(hypervisor=self.DETAIL_HYPERS_DICTS[0]))
class ExtendedHypervisorsTestV2(ExtendedHypervisorsTestV21):
DETAIL_HYPERS_DICTS = copy.deepcopy(test_hypervisors.TEST_HYPERS)
del DETAIL_HYPERS_DICTS[0]['service_id']
del DETAIL_HYPERS_DICTS[1]['service_id']
del DETAIL_HYPERS_DICTS[0]['host']
del DETAIL_HYPERS_DICTS[1]['host']
DETAIL_HYPERS_DICTS[0].update({'service': dict(id=1, host='compute1')})
DETAIL_HYPERS_DICTS[1].update({'service': dict(id=2, host='compute2')})
def _set_up_controller(self):
self.ext_mgr = extensions.ExtensionManager()
self.ext_mgr.extensions = {}
self.ext_mgr.extensions['os-extended-hypervisors'] = True
self.controller = hypervisors_v2.HypervisorsController(self.ext_mgr)
| apache-2.0 |
USGSDenverPychron/pychron | pychron/canvas/canvas2D/scene/strat_scene.py | 1 | 2499 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
# ============= standard library imports ========================
# ============= local library imports ==========================
import yaml
from pychron.canvas.canvas2D.scene.primitives.strat import StratItem
from pychron.canvas.canvas2D.scene.scene import Scene
class StratScene(Scene):
def load(self, p):
self.reset_layers()
heights = []
ages = []
lowages = []
highages = []
# items=[]
yd = self._get_dict(p)
for yi in yd['items']:
elev = yi['elevation']
c = 0
if elev in heights:
c = heights.count(elev)
age = yi['age']
heights.append(elev)
ages.append(age)
err = yi['age_err']
lowages.append(age - err * 2)
highages.append(age + err * 2)
v = StratItem(age,
elev,
error=err,
default_color=yi.get('color', 'black'),
soffset_x=c,
soffset_y=c * 12,
label_offsety=-15,
font='modern 10',
vjustify='center',
text=yi['label'],
use_border=False)
self.add_item(v)
self.value_limits = (min(heights), max(heights))
self.index_limits = (min(lowages), max(highages))
def _get_dict(self, p):
if isinstance(p, (str, unicode)):
with open(p, 'r') as rfile:
p = yaml.load(rfile)
return p
# ============= EOF =============================================
| apache-2.0 |
perimosocordiae/scipy | scipy/io/_fortran.py | 7 | 10867 | """
Module to read / write Fortran unformatted sequential files.
This is in the spirit of code written by Neil Martinsen-Burrell and Joe Zuntz.
"""
import warnings
import numpy as np
__all__ = ['FortranFile', 'FortranEOFError', 'FortranFormattingError']
class FortranEOFError(TypeError, IOError):
"""Indicates that the file ended properly.
This error descends from TypeError because the code used to raise
TypeError (and this was the only way to know that the file had
ended) so users might have ``except TypeError:``.
"""
pass
class FortranFormattingError(TypeError, IOError):
"""Indicates that the file ended mid-record.
Descends from TypeError for backward compatibility.
"""
pass
class FortranFile:
"""
A file object for unformatted sequential files from Fortran code.
Parameters
----------
filename : file or str
Open file object or filename.
mode : {'r', 'w'}, optional
Read-write mode, default is 'r'.
header_dtype : dtype, optional
Data type of the header. Size and endiness must match the input/output file.
Notes
-----
These files are broken up into records of unspecified types. The size of
each record is given at the start (although the size of this header is not
standard) and the data is written onto disk without any formatting. Fortran
compilers supporting the BACKSPACE statement will write a second copy of
the size to facilitate backwards seeking.
This class only supports files written with both sizes for the record.
It also does not support the subrecords used in Intel and gfortran compilers
for records which are greater than 2GB with a 4-byte header.
An example of an unformatted sequential file in Fortran would be written as::
OPEN(1, FILE=myfilename, FORM='unformatted')
WRITE(1) myvariable
Since this is a non-standard file format, whose contents depend on the
compiler and the endianness of the machine, caution is advised. Files from
gfortran 4.8.0 and gfortran 4.1.2 on x86_64 are known to work.
Consider using Fortran direct-access files or files from the newer Stream
I/O, which can be easily read by `numpy.fromfile`.
Examples
--------
To create an unformatted sequential Fortran file:
>>> from scipy.io import FortranFile
>>> f = FortranFile('test.unf', 'w')
>>> f.write_record(np.array([1,2,3,4,5], dtype=np.int32))
>>> f.write_record(np.linspace(0,1,20).reshape((5,4)).T)
>>> f.close()
To read this file:
>>> f = FortranFile('test.unf', 'r')
>>> print(f.read_ints(np.int32))
[1 2 3 4 5]
>>> print(f.read_reals(float).reshape((5,4), order="F"))
[[0. 0.05263158 0.10526316 0.15789474]
[0.21052632 0.26315789 0.31578947 0.36842105]
[0.42105263 0.47368421 0.52631579 0.57894737]
[0.63157895 0.68421053 0.73684211 0.78947368]
[0.84210526 0.89473684 0.94736842 1. ]]
>>> f.close()
Or, in Fortran::
integer :: a(5), i
double precision :: b(5,4)
open(1, file='test.unf', form='unformatted')
read(1) a
read(1) b
close(1)
write(*,*) a
do i = 1, 5
write(*,*) b(i,:)
end do
"""
def __init__(self, filename, mode='r', header_dtype=np.uint32):
if header_dtype is None:
raise ValueError('Must specify dtype')
header_dtype = np.dtype(header_dtype)
if header_dtype.kind != 'u':
warnings.warn("Given a dtype which is not unsigned.")
if mode not in 'rw' or len(mode) != 1:
raise ValueError('mode must be either r or w')
if hasattr(filename, 'seek'):
self._fp = filename
else:
self._fp = open(filename, '%sb' % mode)
self._header_dtype = header_dtype
def _read_size(self, eof_ok=False):
n = self._header_dtype.itemsize
b = self._fp.read(n)
if (not b) and eof_ok:
raise FortranEOFError("End of file occurred at end of record")
elif len(b) < n:
raise FortranFormattingError(
"End of file in the middle of the record size")
return int(np.frombuffer(b, dtype=self._header_dtype, count=1))
def write_record(self, *items):
"""
Write a record (including sizes) to the file.
Parameters
----------
*items : array_like
The data arrays to write.
Notes
-----
Writes data items to a file::
write_record(a.T, b.T, c.T, ...)
write(1) a, b, c, ...
Note that data in multidimensional arrays is written in
row-major order --- to make them read correctly by Fortran
programs, you need to transpose the arrays yourself when
writing them.
"""
items = tuple(np.asarray(item) for item in items)
total_size = sum(item.nbytes for item in items)
nb = np.array([total_size], dtype=self._header_dtype)
nb.tofile(self._fp)
for item in items:
item.tofile(self._fp)
nb.tofile(self._fp)
def read_record(self, *dtypes, **kwargs):
"""
Reads a record of a given type from the file.
Parameters
----------
*dtypes : dtypes, optional
Data type(s) specifying the size and endiness of the data.
Returns
-------
data : ndarray
A 1-D array object.
Raises
------
FortranEOFError
To signal that no further records are available
FortranFormattingError
To signal that the end of the file was encountered
part-way through a record
Notes
-----
If the record contains a multidimensional array, you can specify
the size in the dtype. For example::
INTEGER var(5,4)
can be read with::
read_record('(4,5)i4').T
Note that this function does **not** assume the file data is in Fortran
column major order, so you need to (i) swap the order of dimensions
when reading and (ii) transpose the resulting array.
Alternatively, you can read the data as a 1-D array and handle the
ordering yourself. For example::
read_record('i4').reshape(5, 4, order='F')
For records that contain several variables or mixed types (as opposed
to single scalar or array types), give them as separate arguments::
double precision :: a
integer :: b
write(1) a, b
record = f.read_record('<f4', '<i4')
a = record[0] # first number
b = record[1] # second number
and if any of the variables are arrays, the shape can be specified as
the third item in the relevant dtype::
double precision :: a
integer :: b(3,4)
write(1) a, b
record = f.read_record('<f4', np.dtype(('<i4', (4, 3))))
a = record[0]
b = record[1].T
NumPy also supports a short syntax for this kind of type::
record = f.read_record('<f4', '(3,3)<i4')
See Also
--------
read_reals
read_ints
"""
dtype = kwargs.pop('dtype', None)
if kwargs:
raise ValueError("Unknown keyword arguments {}".format(tuple(kwargs.keys())))
if dtype is not None:
dtypes = dtypes + (dtype,)
elif not dtypes:
raise ValueError('Must specify at least one dtype')
first_size = self._read_size(eof_ok=True)
dtypes = tuple(np.dtype(dtype) for dtype in dtypes)
block_size = sum(dtype.itemsize for dtype in dtypes)
num_blocks, remainder = divmod(first_size, block_size)
if remainder != 0:
raise ValueError('Size obtained ({0}) is not a multiple of the '
'dtypes given ({1}).'.format(first_size, block_size))
if len(dtypes) != 1 and first_size != block_size:
# Fortran does not write mixed type array items in interleaved order,
# and it's not possible to guess the sizes of the arrays that were written.
# The user must specify the exact sizes of each of the arrays.
raise ValueError('Size obtained ({0}) does not match with the expected '
'size ({1}) of multi-item record'.format(first_size, block_size))
data = []
for dtype in dtypes:
r = np.fromfile(self._fp, dtype=dtype, count=num_blocks)
if len(r) != num_blocks:
raise FortranFormattingError(
"End of file in the middle of a record")
if dtype.shape != ():
# Squeeze outmost block dimension for array items
if num_blocks == 1:
assert r.shape == (1,) + dtype.shape
r = r[0]
data.append(r)
second_size = self._read_size()
if first_size != second_size:
raise IOError('Sizes do not agree in the header and footer for '
'this record - check header dtype')
# Unpack result
if len(dtypes) == 1:
return data[0]
else:
return tuple(data)
def read_ints(self, dtype='i4'):
"""
Reads a record of a given type from the file, defaulting to an integer
type (``INTEGER*4`` in Fortran).
Parameters
----------
dtype : dtype, optional
Data type specifying the size and endiness of the data.
Returns
-------
data : ndarray
A 1-D array object.
See Also
--------
read_reals
read_record
"""
return self.read_record(dtype)
def read_reals(self, dtype='f8'):
"""
Reads a record of a given type from the file, defaulting to a floating
point number (``real*8`` in Fortran).
Parameters
----------
dtype : dtype, optional
Data type specifying the size and endiness of the data.
Returns
-------
data : ndarray
A 1-D array object.
See Also
--------
read_ints
read_record
"""
return self.read_record(dtype)
def close(self):
"""
Closes the file. It is unsupported to call any other methods off this
object after closing it. Note that this class supports the 'with'
statement in modern versions of Python, to call this automatically
"""
self._fp.close()
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.close()
| bsd-3-clause |
rohitwaghchaure/frappe-digitales | frappe/templates/pages/print.py | 27 | 6986 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe, os, copy, json, re
from frappe import _
from frappe.modules import get_doc_path
from jinja2 import TemplateNotFound
from frappe.utils import cint, strip_html
from frappe.utils.pdf import get_pdf
no_cache = 1
no_sitemap = 1
base_template_path = "templates/pages/print.html"
standard_format = "templates/print_formats/standard.html"
def get_context(context):
if not frappe.form_dict.format:
frappe.form_dict.format = standard_format
if not frappe.form_dict.doctype or not frappe.form_dict.name:
return {
"body": """<h1>Error</h1>
<p>Parameters doctype, name and format required</p>
<pre>%s</pre>""" % repr(frappe.form_dict)
}
doc = frappe.get_doc(frappe.form_dict.doctype, frappe.form_dict.name)
meta = frappe.get_meta(doc.doctype)
return {
"body": get_html(doc, print_format = frappe.form_dict.format,
meta=meta, trigger_print = frappe.form_dict.trigger_print, no_letterhead=frappe.form_dict.no_letterhead),
"css": get_print_style(frappe.form_dict.style),
"comment": frappe.session.user,
"title": doc.get(meta.title_field) if meta.title_field else doc.name
}
@frappe.whitelist()
def get_html(doc, name=None, print_format=None, meta=None,
no_letterhead=None, trigger_print=False):
if isinstance(no_letterhead, basestring):
no_letterhead = cint(no_letterhead)
elif no_letterhead is None:
no_letterhead = not cint(frappe.db.get_single_value("Print Settings", "with_letterhead"))
if isinstance(doc, basestring) and isinstance(name, basestring):
doc = frappe.get_doc(doc, name)
if isinstance(doc, basestring):
doc = frappe.get_doc(json.loads(doc))
doc.in_print = True
validate_print_permission(doc)
if hasattr(doc, "before_print"):
doc.before_print()
if not hasattr(doc, "print_heading"): doc.print_heading = None
if not hasattr(doc, "sub_heading"): doc.sub_heading = None
if not meta:
meta = frappe.get_meta(doc.doctype)
jenv = frappe.get_jenv()
if print_format in ("Standard", standard_format):
template = jenv.get_template("templates/print_formats/standard.html")
else:
template = jenv.from_string(get_print_format(doc.doctype,
print_format))
args = {
"doc": doc,
"meta": frappe.get_meta(doc.doctype),
"layout": make_layout(doc, meta),
"no_letterhead": no_letterhead,
"trigger_print": cint(trigger_print),
"letter_head": get_letter_head(doc, no_letterhead)
}
html = template.render(args, filters={"len": len})
return html
@frappe.whitelist()
def download_pdf(doctype, name, format=None):
html = frappe.get_print_format(doctype, name, format)
frappe.local.response.filename = "{name}.pdf".format(name=name.replace(" ", "-").replace("/", "-"))
frappe.local.response.filecontent = get_pdf(html)
frappe.local.response.type = "download"
def validate_print_permission(doc):
for ptype in ("read", "print"):
if not frappe.has_permission(doc.doctype, ptype, doc):
raise frappe.PermissionError(_("No {0} permission").format(ptype))
def get_letter_head(doc, no_letterhead):
if no_letterhead:
return ""
if doc.get("letter_head"):
return frappe.db.get_value("Letter Head", doc.letter_head, "content")
else:
return frappe.db.get_value("Letter Head", {"is_default": 1}, "content") or ""
def get_print_format(doctype, format_name):
if format_name==standard_format:
return format_name
opts = frappe.db.get_value("Print Format", format_name, "disabled", as_dict=True)
if not opts:
frappe.throw(_("Print Format {0} does not exist").format(format_name), frappe.DoesNotExistError)
elif opts.disabled:
frappe.throw(_("Print Format {0} is disabled").format(format_name), frappe.DoesNotExistError)
# server, find template
path = os.path.join(get_doc_path(frappe.db.get_value("DocType", doctype, "module"),
"Print Format", format_name), frappe.scrub(format_name) + ".html")
if os.path.exists(path):
with open(path, "r") as pffile:
return pffile.read()
else:
html = frappe.db.get_value("Print Format", format_name, "html")
if html:
return html
else:
frappe.throw(_("No template found at path: {0}").format(path),
frappe.TemplateNotFoundError)
def make_layout(doc, meta):
layout, page = [], []
layout.append(page)
for df in meta.fields:
if df.fieldtype=="Section Break" or page==[]:
page.append([])
if df.fieldtype=="Column Break" or (page[-1]==[] and df.fieldtype!="Section Break"):
page[-1].append([])
if df.fieldtype=="HTML" and df.options:
doc.set(df.fieldname, True) # show this field
if is_visible(df) and has_value(df, doc):
page[-1][-1].append(df)
# if table, add the row info in the field
# if a page break is found, create a new docfield
if df.fieldtype=="Table":
df.rows = []
df.start = 0
df.end = None
for i, row in enumerate(doc.get(df.fieldname)):
if row.get("page_break"):
# close the earlier row
df.end = i
# new page, with empty section and column
page = [[[]]]
layout.append(page)
# continue the table in a new page
df = copy.copy(df)
df.start = i
df.end = None
page[-1][-1].append(df)
# filter empty sections
layout = [filter(lambda s: any(filter(lambda c: any(c), s)), page) for page in layout]
return layout
def is_visible(df):
no_display = ("Section Break", "Column Break", "Button")
return (df.fieldtype not in no_display) and not df.get("__print_hide") and not df.print_hide
def has_value(df, doc):
value = doc.get(df.fieldname)
if value in (None, ""):
return False
elif isinstance(value, basestring) and not strip_html(value).strip():
return False
return True
def get_print_style(style=None):
print_settings = frappe.get_doc("Print Settings")
if not style:
style = print_settings.print_style or "Standard"
context = {"print_settings": print_settings, "print_style": style}
css = frappe.get_template("templates/styles/standard.css").render(context)
try:
additional_css = frappe.get_template("templates/styles/" + style.lower() + ".css").render(context)
# move @import to top
for at_import in list(set(re.findall("(@import url\([^\)]+\)[;]?)", additional_css))):
additional_css = additional_css.replace(at_import, "")
# prepend css with at_import
css = at_import + css
css += "\n" + additional_css
except TemplateNotFound:
pass
return css
def get_visible_columns(data, table_meta):
columns = []
for tdf in table_meta.fields:
if is_visible(tdf) and column_has_value(data, tdf.fieldname):
columns.append(tdf)
return columns
def column_has_value(data, fieldname):
"""Check if at least one cell in column has non-zero and non-blank value"""
has_value = False
for row in data:
value = row.get(fieldname)
if value:
if isinstance(value, basestring):
if strip_html(value).strip():
has_value = True
break
else:
has_value = True
break
return has_value
| mit |
mpare002/HackTech_2017 | env/Lib/encodings/cp037.py | 593 | 13377 | """ Python Character Mapping Codec cp037 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP037.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp037',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x48 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xa2' # 0x4A -> CENT SIGN
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'|' # 0x4F -> VERTICAL LINE
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'!' # 0x5A -> EXCLAMATION MARK
u'$' # 0x5B -> DOLLAR SIGN
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'\xac' # 0x5F -> NOT SIGN
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x68 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xa6' # 0x6A -> BROKEN BAR
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'`' # 0x79 -> GRAVE ACCENT
u':' # 0x7A -> COLON
u'#' # 0x7B -> NUMBER SIGN
u'@' # 0x7C -> COMMERCIAL AT
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'"' # 0x7F -> QUOTATION MARK
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xf0' # 0x8C -> LATIN SMALL LETTER ETH (ICELANDIC)
u'\xfd' # 0x8D -> LATIN SMALL LETTER Y WITH ACUTE
u'\xfe' # 0x8E -> LATIN SMALL LETTER THORN (ICELANDIC)
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'~' # 0xA1 -> TILDE
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u'\xd0' # 0xAC -> LATIN CAPITAL LETTER ETH (ICELANDIC)
u'\xdd' # 0xAD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xde' # 0xAE -> LATIN CAPITAL LETTER THORN (ICELANDIC)
u'\xae' # 0xAF -> REGISTERED SIGN
u'^' # 0xB0 -> CIRCUMFLEX ACCENT
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'[' # 0xBA -> LEFT SQUARE BRACKET
u']' # 0xBB -> RIGHT SQUARE BRACKET
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'{' # 0xC0 -> LEFT CURLY BRACKET
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0xCC -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'}' # 0xD0 -> RIGHT CURLY BRACKET
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xDC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\\' # 0xE0 -> REVERSE SOLIDUS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd6' # 0xEC -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xFC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
amenonsen/ansible | lib/ansible/modules/cloud/cloudstack/cs_sshkeypair.py | 17 | 8435 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_sshkeypair
short_description: Manages SSH keys on Apache CloudStack based clouds.
description:
- Create, register and remove SSH keys.
- If no key was found and no public key was provided and a new SSH
private/public key pair will be created and the private key will be returned.
version_added: '2.0'
author: René Moser (@resmo)
options:
name:
description:
- Name of public key.
type: str
required: true
domain:
description:
- Domain the public key is related to.
type: str
account:
description:
- Account the public key is related to.
type: str
project:
description:
- Name of the project the public key to be registered in.
type: str
state:
description:
- State of the public key.
type: str
default: present
choices: [ present, absent ]
public_key:
description:
- String of the public key.
type: str
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: create a new private / public key pair
cs_sshkeypair:
name: linus@example.com
delegate_to: localhost
register: key
- debug:
msg: 'Private key is {{ key.private_key }}'
- name: remove a public key by its name
cs_sshkeypair:
name: linus@example.com
state: absent
delegate_to: localhost
- name: register your existing local public key
cs_sshkeypair:
name: linus@example.com
public_key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}"
delegate_to: localhost
'''
RETURN = '''
---
id:
description: UUID of the SSH public key.
returned: success
type: str
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the SSH public key.
returned: success
type: str
sample: linus@example.com
fingerprint:
description: Fingerprint of the SSH public key.
returned: success
type: str
sample: "86:5e:a3:e8:bd:95:7b:07:7c:c2:5c:f7:ad:8b:09:28"
private_key:
description: Private key of generated SSH keypair.
returned: changed
type: str
sample: "-----BEGIN RSA PRIVATE KEY-----\nMII...8tO\n-----END RSA PRIVATE KEY-----\n"
'''
import traceback
SSHPUBKEYS_IMP_ERR = None
try:
import sshpubkeys
HAS_LIB_SSHPUBKEYS = True
except ImportError:
SSHPUBKEYS_IMP_ERR = traceback.format_exc()
HAS_LIB_SSHPUBKEYS = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_required_together,
cs_argument_spec
)
class AnsibleCloudStackSshKey(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackSshKey, self).__init__(module)
self.returns = {
'privatekey': 'private_key',
'fingerprint': 'fingerprint',
}
self.ssh_key = None
def register_ssh_key(self, public_key):
ssh_key = self.get_ssh_key()
args = self._get_common_args()
name = self.module.params.get('name')
res = None
if not ssh_key:
self.result['changed'] = True
args['publickey'] = public_key
if not self.module.check_mode:
args['name'] = name
res = self.query_api('registerSSHKeyPair', **args)
else:
fingerprint = self._get_ssh_fingerprint(public_key)
if ssh_key['fingerprint'] != fingerprint:
self.result['changed'] = True
if not self.module.check_mode:
# delete the ssh key with matching name but wrong fingerprint
args['name'] = name
self.query_api('deleteSSHKeyPair', **args)
elif ssh_key['name'].lower() != name.lower():
self.result['changed'] = True
if not self.module.check_mode:
# delete the ssh key with matching fingerprint but wrong name
args['name'] = ssh_key['name']
self.query_api('deleteSSHKeyPair', **args)
# First match for key retrievment will be the fingerprint.
# We need to make another lookup if there is a key with identical name.
self.ssh_key = None
ssh_key = self.get_ssh_key()
if ssh_key and ssh_key['fingerprint'] != fingerprint:
args['name'] = name
self.query_api('deleteSSHKeyPair', **args)
if not self.module.check_mode and self.result['changed']:
args['publickey'] = public_key
args['name'] = name
res = self.query_api('registerSSHKeyPair', **args)
if res and 'keypair' in res:
ssh_key = res['keypair']
return ssh_key
def create_ssh_key(self):
ssh_key = self.get_ssh_key()
if not ssh_key:
self.result['changed'] = True
args = self._get_common_args()
args['name'] = self.module.params.get('name')
if not self.module.check_mode:
res = self.query_api('createSSHKeyPair', **args)
ssh_key = res['keypair']
return ssh_key
def remove_ssh_key(self, name=None):
ssh_key = self.get_ssh_key()
if ssh_key:
self.result['changed'] = True
args = self._get_common_args()
args['name'] = name or self.module.params.get('name')
if not self.module.check_mode:
self.query_api('deleteSSHKeyPair', **args)
return ssh_key
def _get_common_args(self):
return {
'domainid': self.get_domain('id'),
'account': self.get_account('name'),
'projectid': self.get_project('id')
}
def get_ssh_key(self):
if not self.ssh_key:
public_key = self.module.params.get('public_key')
if public_key:
# Query by fingerprint of the public key
args_fingerprint = self._get_common_args()
args_fingerprint['fingerprint'] = self._get_ssh_fingerprint(public_key)
ssh_keys = self.query_api('listSSHKeyPairs', **args_fingerprint)
if ssh_keys and 'sshkeypair' in ssh_keys:
self.ssh_key = ssh_keys['sshkeypair'][0]
# When key has not been found by fingerprint, use the name
if not self.ssh_key:
args_name = self._get_common_args()
args_name['name'] = self.module.params.get('name')
ssh_keys = self.query_api('listSSHKeyPairs', **args_name)
if ssh_keys and 'sshkeypair' in ssh_keys:
self.ssh_key = ssh_keys['sshkeypair'][0]
return self.ssh_key
def _get_ssh_fingerprint(self, public_key):
key = sshpubkeys.SSHKey(public_key)
if hasattr(key, 'hash_md5'):
return key.hash_md5().replace(to_native('MD5:'), to_native(''))
return key.hash()
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
public_key=dict(),
domain=dict(),
account=dict(),
project=dict(),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
if not HAS_LIB_SSHPUBKEYS:
module.fail_json(msg=missing_required_lib("sshpubkeys"), exception=SSHPUBKEYS_IMP_ERR)
acs_sshkey = AnsibleCloudStackSshKey(module)
state = module.params.get('state')
if state in ['absent']:
ssh_key = acs_sshkey.remove_ssh_key()
else:
public_key = module.params.get('public_key')
if public_key:
ssh_key = acs_sshkey.register_ssh_key(public_key)
else:
ssh_key = acs_sshkey.create_ssh_key()
result = acs_sshkey.get_result(ssh_key)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
sigma-random/asuswrt-merlin | release/src/router/libevent-2.0.21/event_rpcgen.py | 81 | 55456 | #!/usr/bin/env python2
#
# Copyright (c) 2005-2007 Niels Provos <provos@citi.umich.edu>
# Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
# All rights reserved.
#
# Generates marshaling code based on libevent.
# TODO:
# 1) use optparse to allow the strategy shell to parse options, and
# to allow the instantiated factory (for the specific output language)
# to parse remaining options
# 2) move the globals into a class that manages execution (including the
# progress outputs that space stderr at the moment)
# 3) emit other languages
import sys
import re
_NAME = "event_rpcgen.py"
_VERSION = "0.1"
# Globals
line_count = 0
white = re.compile(r'\s+')
cppcomment = re.compile(r'\/\/.*$')
nonident = re.compile(r'[^a-zA-Z0-9_]')
structref = re.compile(r'^struct\[([a-zA-Z_][a-zA-Z0-9_]*)\]$')
structdef = re.compile(r'^struct +[a-zA-Z_][a-zA-Z0-9_]* *{$')
headerdirect = []
cppdirect = []
def TranslateList(mylist, mydict):
return map(lambda x: x % mydict, mylist)
# Exception class for parse errors
class RpcGenError(Exception):
def __init__(self, why):
self.why = why
def __str__(self):
return str(self.why)
# Holds everything that makes a struct
class Struct:
def __init__(self, name):
self._name = name
self._entries = []
self._tags = {}
print >>sys.stderr, ' Created struct: %s' % name
def AddEntry(self, entry):
if self._tags.has_key(entry.Tag()):
raise RpcGenError(
'Entry "%s" duplicates tag number %d from "%s" '
'around line %d' % (entry.Name(), entry.Tag(),
self._tags[entry.Tag()], line_count))
self._entries.append(entry)
self._tags[entry.Tag()] = entry.Name()
print >>sys.stderr, ' Added entry: %s' % entry.Name()
def Name(self):
return self._name
def EntryTagName(self, entry):
"""Creates the name inside an enumeration for distinguishing data
types."""
name = "%s_%s" % (self._name, entry.Name())
return name.upper()
def PrintIndented(self, file, ident, code):
"""Takes an array, add indentation to each entry and prints it."""
for entry in code:
print >>file, '%s%s' % (ident, entry)
class StructCCode(Struct):
""" Knows how to generate C code for a struct """
def __init__(self, name):
Struct.__init__(self, name)
def PrintTags(self, file):
"""Prints the tag definitions for a structure."""
print >>file, '/* Tag definition for %s */' % self._name
print >>file, 'enum %s_ {' % self._name.lower()
for entry in self._entries:
print >>file, ' %s=%d,' % (self.EntryTagName(entry),
entry.Tag())
print >>file, ' %s_MAX_TAGS' % (self._name.upper())
print >>file, '};\n'
def PrintForwardDeclaration(self, file):
print >>file, 'struct %s;' % self._name
def PrintDeclaration(self, file):
print >>file, '/* Structure declaration for %s */' % self._name
print >>file, 'struct %s_access_ {' % self._name
for entry in self._entries:
dcl = entry.AssignDeclaration('(*%s_assign)' % entry.Name())
dcl.extend(
entry.GetDeclaration('(*%s_get)' % entry.Name()))
if entry.Array():
dcl.extend(
entry.AddDeclaration('(*%s_add)' % entry.Name()))
self.PrintIndented(file, ' ', dcl)
print >>file, '};\n'
print >>file, 'struct %s {' % self._name
print >>file, ' struct %s_access_ *base;\n' % self._name
for entry in self._entries:
dcl = entry.Declaration()
self.PrintIndented(file, ' ', dcl)
print >>file, ''
for entry in self._entries:
print >>file, ' ev_uint8_t %s_set;' % entry.Name()
print >>file, '};\n'
print >>file, \
"""struct %(name)s *%(name)s_new(void);
struct %(name)s *%(name)s_new_with_arg(void *);
void %(name)s_free(struct %(name)s *);
void %(name)s_clear(struct %(name)s *);
void %(name)s_marshal(struct evbuffer *, const struct %(name)s *);
int %(name)s_unmarshal(struct %(name)s *, struct evbuffer *);
int %(name)s_complete(struct %(name)s *);
void evtag_marshal_%(name)s(struct evbuffer *, ev_uint32_t,
const struct %(name)s *);
int evtag_unmarshal_%(name)s(struct evbuffer *, ev_uint32_t,
struct %(name)s *);""" % { 'name' : self._name }
# Write a setting function of every variable
for entry in self._entries:
self.PrintIndented(file, '', entry.AssignDeclaration(
entry.AssignFuncName()))
self.PrintIndented(file, '', entry.GetDeclaration(
entry.GetFuncName()))
if entry.Array():
self.PrintIndented(file, '', entry.AddDeclaration(
entry.AddFuncName()))
print >>file, '/* --- %s done --- */\n' % self._name
def PrintCode(self, file):
print >>file, ('/*\n'
' * Implementation of %s\n'
' */\n') % self._name
print >>file, \
'static struct %(name)s_access_ __%(name)s_base = {' % \
{ 'name' : self._name }
for entry in self._entries:
self.PrintIndented(file, ' ', entry.CodeBase())
print >>file, '};\n'
# Creation
print >>file, (
'struct %(name)s *\n'
'%(name)s_new(void)\n'
'{\n'
' return %(name)s_new_with_arg(NULL);\n'
'}\n'
'\n'
'struct %(name)s *\n'
'%(name)s_new_with_arg(void *unused)\n'
'{\n'
' struct %(name)s *tmp;\n'
' if ((tmp = malloc(sizeof(struct %(name)s))) == NULL) {\n'
' event_warn("%%s: malloc", __func__);\n'
' return (NULL);\n'
' }\n'
' tmp->base = &__%(name)s_base;\n') % { 'name' : self._name }
for entry in self._entries:
self.PrintIndented(file, ' ', entry.CodeInitialize('tmp'))
print >>file, ' tmp->%s_set = 0;\n' % entry.Name()
print >>file, (
' return (tmp);\n'
'}\n')
# Adding
for entry in self._entries:
if entry.Array():
self.PrintIndented(file, '', entry.CodeAdd())
print >>file, ''
# Assigning
for entry in self._entries:
self.PrintIndented(file, '', entry.CodeAssign())
print >>file, ''
# Getting
for entry in self._entries:
self.PrintIndented(file, '', entry.CodeGet())
print >>file, ''
# Clearing
print >>file, ( 'void\n'
'%(name)s_clear(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIndented(file, ' ', entry.CodeClear('tmp'))
print >>file, '}\n'
# Freeing
print >>file, ( 'void\n'
'%(name)s_free(struct %(name)s *tmp)\n'
'{'
) % { 'name' : self._name }
for entry in self._entries:
self.PrintIndented(file, ' ', entry.CodeFree('tmp'))
print >>file, (' free(tmp);\n'
'}\n')
# Marshaling
print >>file, ('void\n'
'%(name)s_marshal(struct evbuffer *evbuf, '
'const struct %(name)s *tmp)'
'{') % { 'name' : self._name }
for entry in self._entries:
indent = ' '
# Optional entries do not have to be set
if entry.Optional():
indent += ' '
print >>file, ' if (tmp->%s_set) {' % entry.Name()
self.PrintIndented(
file, indent,
entry.CodeMarshal('evbuf', self.EntryTagName(entry),
entry.GetVarName('tmp'),
entry.GetVarLen('tmp')))
if entry.Optional():
print >>file, ' }'
print >>file, '}\n'
# Unmarshaling
print >>file, ('int\n'
'%(name)s_unmarshal(struct %(name)s *tmp, '
' struct evbuffer *evbuf)\n'
'{\n'
' ev_uint32_t tag;\n'
' while (evbuffer_get_length(evbuf) > 0) {\n'
' if (evtag_peek(evbuf, &tag) == -1)\n'
' return (-1);\n'
' switch (tag) {\n'
) % { 'name' : self._name }
for entry in self._entries:
print >>file, ' case %s:\n' % self.EntryTagName(entry)
if not entry.Array():
print >>file, (
' if (tmp->%s_set)\n'
' return (-1);'
) % (entry.Name())
self.PrintIndented(
file, ' ',
entry.CodeUnmarshal('evbuf',
self.EntryTagName(entry),
entry.GetVarName('tmp'),
entry.GetVarLen('tmp')))
print >>file, ( ' tmp->%s_set = 1;\n' % entry.Name() +
' break;\n' )
print >>file, ( ' default:\n'
' return -1;\n'
' }\n'
' }\n' )
# Check if it was decoded completely
print >>file, ( ' if (%(name)s_complete(tmp) == -1)\n'
' return (-1);'
) % { 'name' : self._name }
# Successfully decoded
print >>file, ( ' return (0);\n'
'}\n')
# Checking if a structure has all the required data
print >>file, (
'int\n'
'%(name)s_complete(struct %(name)s *msg)\n'
'{' ) % { 'name' : self._name }
for entry in self._entries:
if not entry.Optional():
code = [
'if (!msg->%(name)s_set)',
' return (-1);' ]
code = TranslateList(code, entry.GetTranslation())
self.PrintIndented(
file, ' ', code)
self.PrintIndented(
file, ' ',
entry.CodeComplete('msg', entry.GetVarName('msg')))
print >>file, (
' return (0);\n'
'}\n' )
# Complete message unmarshaling
print >>file, (
'int\n'
'evtag_unmarshal_%(name)s(struct evbuffer *evbuf, '
'ev_uint32_t need_tag, struct %(name)s *msg)\n'
'{\n'
' ev_uint32_t tag;\n'
' int res = -1;\n'
'\n'
' struct evbuffer *tmp = evbuffer_new();\n'
'\n'
' if (evtag_unmarshal(evbuf, &tag, tmp) == -1'
' || tag != need_tag)\n'
' goto error;\n'
'\n'
' if (%(name)s_unmarshal(msg, tmp) == -1)\n'
' goto error;\n'
'\n'
' res = 0;\n'
'\n'
' error:\n'
' evbuffer_free(tmp);\n'
' return (res);\n'
'}\n' ) % { 'name' : self._name }
# Complete message marshaling
print >>file, (
'void\n'
'evtag_marshal_%(name)s(struct evbuffer *evbuf, ev_uint32_t tag, '
'const struct %(name)s *msg)\n'
'{\n'
' struct evbuffer *_buf = evbuffer_new();\n'
' assert(_buf != NULL);\n'
' %(name)s_marshal(_buf, msg);\n'
' evtag_marshal_buffer(evbuf, tag, _buf);\n '
' evbuffer_free(_buf);\n'
'}\n' ) % { 'name' : self._name }
class Entry:
def __init__(self, type, name, tag):
self._type = type
self._name = name
self._tag = int(tag)
self._ctype = type
self._optional = 0
self._can_be_array = 0
self._array = 0
self._line_count = -1
self._struct = None
self._refname = None
self._optpointer = True
self._optaddarg = True
def GetInitializer(self):
assert 0, "Entry does not provide initializer"
def SetStruct(self, struct):
self._struct = struct
def LineCount(self):
assert self._line_count != -1
return self._line_count
def SetLineCount(self, number):
self._line_count = number
def Array(self):
return self._array
def Optional(self):
return self._optional
def Tag(self):
return self._tag
def Name(self):
return self._name
def Type(self):
return self._type
def MakeArray(self, yes=1):
self._array = yes
def MakeOptional(self):
self._optional = 1
def Verify(self):
if self.Array() and not self._can_be_array:
raise RpcGenError(
'Entry "%s" cannot be created as an array '
'around line %d' % (self._name, self.LineCount()))
if not self._struct:
raise RpcGenError(
'Entry "%s" does not know which struct it belongs to '
'around line %d' % (self._name, self.LineCount()))
if self._optional and self._array:
raise RpcGenError(
'Entry "%s" has illegal combination of optional and array '
'around line %d' % (self._name, self.LineCount()))
def GetTranslation(self, extradict = {}):
mapping = {
"parent_name" : self._struct.Name(),
"name" : self._name,
"ctype" : self._ctype,
"refname" : self._refname,
"optpointer" : self._optpointer and "*" or "",
"optreference" : self._optpointer and "&" or "",
"optaddarg" :
self._optaddarg and ", const %s value" % self._ctype or ""
}
for (k, v) in extradict.items():
mapping[k] = v
return mapping
def GetVarName(self, var):
return '%(var)s->%(name)s_data' % self.GetTranslation({ 'var' : var })
def GetVarLen(self, var):
return 'sizeof(%s)' % self._ctype
def GetFuncName(self):
return '%s_%s_get' % (self._struct.Name(), self._name)
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeGet(self):
code = (
'int',
'%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, '
'%(ctype)s *value)',
'{',
' if (msg->%(name)s_set != 1)',
' return (-1);',
' *value = msg->%(name)s_data;',
' return (0);',
'}' )
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def AssignFuncName(self):
return '%s_%s_assign' % (self._struct.Name(), self._name)
def AddFuncName(self):
return '%s_%s_add' % (self._struct.Name(), self._name)
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
code = [ 'int',
'%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,'
' const %(ctype)s value)',
'{',
' msg->%(name)s_set = 1;',
' msg->%(name)s_data = value;',
' return (0);',
'}' ]
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()) ]
return code
def CodeComplete(self, structname, var_name):
return []
def CodeFree(self, name):
return []
def CodeBase(self):
code = [
'%(parent_name)s_%(name)s_assign,',
'%(parent_name)s_%(name)s_get,'
]
if self.Array():
code.append('%(parent_name)s_%(name)s_add,')
code = '\n'.join(code)
code = code % self.GetTranslation()
return code.split('\n')
class EntryBytes(Entry):
def __init__(self, type, name, tag, length):
# Init base class
Entry.__init__(self, type, name, tag)
self._length = length
self._ctype = 'ev_uint8_t'
def GetInitializer(self):
return "NULL"
def GetVarLen(self, var):
return '(%s)' % self._length
def CodeArrayAdd(self, varname, value):
# XXX: copy here
return [ '%(varname)s = NULL;' % { 'varname' : varname } ]
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s **);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def Declaration(self):
dcl = ['ev_uint8_t %s_data[%s];' % (self._name, self._length)]
return dcl
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s **value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, const %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' msg->%s_set = 1;' % name,
' memcpy(msg->%s_data, value, %s);' % (
name, self._length),
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = [ 'if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, '
'%(var)s, %(varlen)s) == -1) {',
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
' return (-1);',
'}'
]
return TranslateList(code,
self.GetTranslation({
'var' : var_name,
'varlen' : var_len,
'buf' : buf,
'tag' : tag_name }))
def CodeMarshal(self, buf, tag_name, var_name, var_len):
code = ['evtag_marshal(%s, %s, %s, %s);' % (
buf, tag_name, var_name, var_len)]
return code
def CodeClear(self, structname):
code = [ '%s->%s_set = 0;' % (structname, self.Name()),
'memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
structname, self._name, structname, self._name)]
return code
def CodeInitialize(self, name):
code = ['memset(%s->%s_data, 0, sizeof(%s->%s_data));' % (
name, self._name, name, self._name)]
return code
def Verify(self):
if not self._length:
raise RpcGenError(
'Entry "%s" needs a length '
'around line %d' % (self._name, self.LineCount()))
Entry.Verify(self)
class EntryInt(Entry):
def __init__(self, type, name, tag, bits=32):
# Init base class
Entry.__init__(self, type, name, tag)
self._can_be_array = 1
if bits == 32:
self._ctype = 'ev_uint32_t'
self._marshal_type = 'int'
if bits == 64:
self._ctype = 'ev_uint64_t'
self._marshal_type = 'int64'
def GetInitializer(self):
return "0"
def CodeArrayFree(self, var):
return []
def CodeArrayAssign(self, varname, srcvar):
return [ '%(varname)s = %(srcvar)s;' % { 'varname' : varname,
'srcvar' : srcvar } ]
def CodeArrayAdd(self, varname, value):
"""Returns a new entry of this type."""
return [ '%(varname)s = %(value)s;' % { 'varname' : varname,
'value' : value } ]
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = [
'if (evtag_unmarshal_%(ma)s(%(buf)s, %(tag)s, &%(var)s) == -1) {',
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
' return (-1);',
'}' ]
code = '\n'.join(code) % self.GetTranslation({
'ma' : self._marshal_type,
'buf' : buf,
'tag' : tag_name,
'var' : var_name })
return code.split('\n')
def CodeMarshal(self, buf, tag_name, var_name, var_len):
code = [
'evtag_marshal_%s(%s, %s, %s);' % (
self._marshal_type, buf, tag_name, var_name)]
return code
def Declaration(self):
dcl = ['%s %s_data;' % (self._ctype, self._name)]
return dcl
def CodeInitialize(self, name):
code = ['%s->%s_data = 0;' % (name, self._name)]
return code
class EntryString(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._can_be_array = 1
self._ctype = 'char *'
def GetInitializer(self):
return "NULL"
def CodeArrayFree(self, varname):
code = [
'if (%(var)s != NULL) free(%(var)s);' ]
return TranslateList(code, { 'var' : varname })
def CodeArrayAssign(self, varname, srcvar):
code = [
'if (%(var)s != NULL)',
' free(%(var)s);',
'%(var)s = strdup(%(srcvar)s);',
'if (%(var)s == NULL) {',
' event_warnx("%%s: strdup", __func__);',
' return (-1);',
'}' ]
return TranslateList(code, { 'var' : varname,
'srcvar' : srcvar })
def CodeArrayAdd(self, varname, value):
code = [
'if (%(value)s != NULL) {',
' %(var)s = strdup(%(value)s);',
' if (%(var)s == NULL) {',
' goto error;',
' }',
'} else {',
' %(var)s = NULL;',
'}' ]
return TranslateList(code, { 'var' : varname,
'value' : value })
def GetVarLen(self, var):
return 'strlen(%s)' % self.GetVarName(var)
def CodeMakeInitalize(self, varname):
return '%(varname)s = NULL;' % { 'varname' : varname }
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
if (msg->%(name)s_data != NULL)
free(msg->%(name)s_data);
if ((msg->%(name)s_data = strdup(value)) == NULL)
return (-1);
msg->%(name)s_set = 1;
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = ['if (evtag_unmarshal_string(%(buf)s, %(tag)s, &%(var)s) == -1) {',
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
' return (-1);',
'}'
]
code = '\n'.join(code) % self.GetTranslation({
'buf' : buf,
'tag' : tag_name,
'var' : var_name })
return code.split('\n')
def CodeMarshal(self, buf, tag_name, var_name, var_len):
code = ['evtag_marshal_string(%s, %s, %s);' % (
buf, tag_name, var_name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free(%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeInitialize(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free (%s->%s_data);' % (name, self._name)]
return code
def Declaration(self):
dcl = ['char *%s_data;' % self._name]
return dcl
class EntryStruct(Entry):
def __init__(self, type, name, tag, refname):
# Init base class
Entry.__init__(self, type, name, tag)
self._optpointer = False
self._can_be_array = 1
self._refname = refname
self._ctype = 'struct %s*' % refname
self._optaddarg = False
def GetInitializer(self):
return "NULL"
def GetVarLen(self, var):
return '-1'
def CodeArrayAdd(self, varname, value):
code = [
'%(varname)s = %(refname)s_new();',
'if (%(varname)s == NULL)',
' goto error;' ]
return TranslateList(code, self.GetTranslation({ 'varname' : varname }))
def CodeArrayFree(self, var):
code = [ '%(refname)s_free(%(var)s);' % self.GetTranslation(
{ 'var' : var }) ]
return code
def CodeArrayAssign(self, var, srcvar):
code = [
'int had_error = 0;',
'struct evbuffer *tmp = NULL;',
'%(refname)s_clear(%(var)s);',
'if ((tmp = evbuffer_new()) == NULL) {',
' event_warn("%%s: evbuffer_new()", __func__);',
' had_error = 1;',
' goto done;',
'}',
'%(refname)s_marshal(tmp, %(srcvar)s);',
'if (%(refname)s_unmarshal(%(var)s, tmp) == -1) {',
' event_warnx("%%s: %(refname)s_unmarshal", __func__);',
' had_error = 1;',
' goto done;',
'}',
'done:'
'if (tmp != NULL)',
' evbuffer_free(tmp);',
'if (had_error) {',
' %(refname)s_clear(%(var)s);',
' return (-1);',
'}' ]
return TranslateList(code, self.GetTranslation({
'var' : var,
'srcvar' : srcvar}))
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1) {' % name,
' msg->%s_data = %s_new();' % (name, self._refname),
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' }',
' *value = msg->%s_data;' % name,
' return (0);',
'}' ]
return code
def CodeAssign(self):
name = self._name
code = """int
%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg,
const %(ctype)s value)
{
struct evbuffer *tmp = NULL;
if (msg->%(name)s_set) {
%(refname)s_clear(msg->%(name)s_data);
msg->%(name)s_set = 0;
} else {
msg->%(name)s_data = %(refname)s_new();
if (msg->%(name)s_data == NULL) {
event_warn("%%s: %(refname)s_new()", __func__);
goto error;
}
}
if ((tmp = evbuffer_new()) == NULL) {
event_warn("%%s: evbuffer_new()", __func__);
goto error;
}
%(refname)s_marshal(tmp, value);
if (%(refname)s_unmarshal(msg->%(name)s_data, tmp) == -1) {
event_warnx("%%s: %(refname)s_unmarshal", __func__);
goto error;
}
msg->%(name)s_set = 1;
evbuffer_free(tmp);
return (0);
error:
if (tmp != NULL)
evbuffer_free(tmp);
if (msg->%(name)s_data != NULL) {
%(refname)s_free(msg->%(name)s_data);
msg->%(name)s_data = NULL;
}
return (-1);
}""" % self.GetTranslation()
return code.split('\n')
def CodeComplete(self, structname, var_name):
code = [ 'if (%(structname)s->%(name)s_set && '
'%(refname)s_complete(%(var)s) == -1)',
' return (-1);' ]
return TranslateList(code, self.GetTranslation({
'structname' : structname,
'var' : var_name }))
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = ['%(var)s = %(refname)s_new();',
'if (%(var)s == NULL)',
' return (-1);',
'if (evtag_unmarshal_%(refname)s(%(buf)s, %(tag)s, '
'%(var)s) == -1) {',
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
' return (-1);',
'}'
]
code = '\n'.join(code) % self.GetTranslation({
'buf' : buf,
'tag' : tag_name,
'var' : var_name })
return code.split('\n')
def CodeMarshal(self, buf, tag_name, var_name, var_len):
code = ['evtag_marshal_%s(%s, %s, %s);' % (
self._refname, buf, tag_name, var_name)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' %s_free(%s->%s_data);' % (
self._refname, structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeInitialize(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name)]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' %s_free(%s->%s_data);' % (
self._refname, name, self._name)]
return code
def Declaration(self):
dcl = ['%s %s_data;' % (self._ctype, self._name)]
return dcl
class EntryVarBytes(Entry):
def __init__(self, type, name, tag):
# Init base class
Entry.__init__(self, type, name, tag)
self._ctype = 'ev_uint8_t *'
def GetInitializer(self):
return "NULL"
def GetVarLen(self, var):
return '%(var)s->%(name)s_length' % self.GetTranslation({ 'var' : var })
def CodeArrayAdd(self, varname, value):
# xxx: copy
return [ '%(varname)s = NULL;' % { 'varname' : varname } ]
def GetDeclaration(self, funcname):
code = [ 'int %s(struct %s *, %s *, ev_uint32_t *);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, const %s, ev_uint32_t);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def CodeAssign(self):
name = self._name
code = [ 'int',
'%s_%s_assign(struct %s *msg, '
'const %s value, ev_uint32_t len)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_data != NULL)' % name,
' free (msg->%s_data);' % name,
' msg->%s_data = malloc(len);' % name,
' if (msg->%s_data == NULL)' % name,
' return (-1);',
' msg->%s_set = 1;' % name,
' msg->%s_length = len;' % name,
' memcpy(msg->%s_data, value, len);' % name,
' return (0);',
'}' ]
return code
def CodeGet(self):
name = self._name
code = [ 'int',
'%s_%s_get(struct %s *msg, %s *value, ev_uint32_t *plen)' % (
self._struct.Name(), name,
self._struct.Name(), self._ctype),
'{',
' if (msg->%s_set != 1)' % name,
' return (-1);',
' *value = msg->%s_data;' % name,
' *plen = msg->%s_length;' % name,
' return (0);',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
code = ['if (evtag_payload_length(%(buf)s, &%(varlen)s) == -1)',
' return (-1);',
# We do not want DoS opportunities
'if (%(varlen)s > evbuffer_get_length(%(buf)s))',
' return (-1);',
'if ((%(var)s = malloc(%(varlen)s)) == NULL)',
' return (-1);',
'if (evtag_unmarshal_fixed(%(buf)s, %(tag)s, %(var)s, '
'%(varlen)s) == -1) {',
' event_warnx("%%s: failed to unmarshal %(name)s", __func__);',
' return (-1);',
'}'
]
code = '\n'.join(code) % self.GetTranslation({
'buf' : buf,
'tag' : tag_name,
'var' : var_name,
'varlen' : var_len })
return code.split('\n')
def CodeMarshal(self, buf, tag_name, var_name, var_len):
code = ['evtag_marshal(%s, %s, %s, %s);' % (
buf, tag_name, var_name, var_len)]
return code
def CodeClear(self, structname):
code = [ 'if (%s->%s_set == 1) {' % (structname, self.Name()),
' free (%s->%s_data);' % (structname, self.Name()),
' %s->%s_data = NULL;' % (structname, self.Name()),
' %s->%s_length = 0;' % (structname, self.Name()),
' %s->%s_set = 0;' % (structname, self.Name()),
'}'
]
return code
def CodeInitialize(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name) ]
return code
def CodeFree(self, name):
code = ['if (%s->%s_data != NULL)' % (name, self._name),
' free(%s->%s_data);' % (name, self._name)]
return code
def Declaration(self):
dcl = ['ev_uint8_t *%s_data;' % self._name,
'ev_uint32_t %s_length;' % self._name]
return dcl
class EntryArray(Entry):
def __init__(self, entry):
# Init base class
Entry.__init__(self, entry._type, entry._name, entry._tag)
self._entry = entry
self._refname = entry._refname
self._ctype = self._entry._ctype
self._optional = True
self._optpointer = self._entry._optpointer
self._optaddarg = self._entry._optaddarg
# provide a new function for accessing the variable name
def GetVarName(var_name):
return '%(var)s->%(name)s_data[%(index)s]' % \
self._entry.GetTranslation({'var' : var_name,
'index' : self._index})
self._entry.GetVarName = GetVarName
def GetInitializer(self):
return "NULL"
def GetVarName(self, var_name):
return var_name
def GetVarLen(self, var_name):
return '-1'
def GetDeclaration(self, funcname):
"""Allows direct access to elements of the array."""
code = [
'int %(funcname)s(struct %(parent_name)s *, int, %(ctype)s *);' %
self.GetTranslation({ 'funcname' : funcname }) ]
return code
def AssignDeclaration(self, funcname):
code = [ 'int %s(struct %s *, int, const %s);' % (
funcname, self._struct.Name(), self._ctype ) ]
return code
def AddDeclaration(self, funcname):
code = [
'%(ctype)s %(optpointer)s '
'%(funcname)s(struct %(parent_name)s *msg%(optaddarg)s);' % \
self.GetTranslation({ 'funcname' : funcname }) ]
return code
def CodeGet(self):
code = """int
%(parent_name)s_%(name)s_get(struct %(parent_name)s *msg, int offset,
%(ctype)s *value)
{
if (!msg->%(name)s_set || offset < 0 || offset >= msg->%(name)s_length)
return (-1);
*value = msg->%(name)s_data[offset];
return (0);
}""" % self.GetTranslation()
return code.split('\n')
def CodeAssign(self):
code = [
'int',
'%(parent_name)s_%(name)s_assign(struct %(parent_name)s *msg, int off,',
' const %(ctype)s value)',
'{',
' if (!msg->%(name)s_set || off < 0 || off >= msg->%(name)s_length)',
' return (-1);\n',
' {' ]
code = TranslateList(code, self.GetTranslation())
codearrayassign = self._entry.CodeArrayAssign(
'msg->%(name)s_data[off]' % self.GetTranslation(), 'value')
code += map(lambda x: ' ' + x, codearrayassign)
code += TranslateList([
' }',
' return (0);',
'}' ], self.GetTranslation())
return code
def CodeAdd(self):
codearrayadd = self._entry.CodeArrayAdd(
'msg->%(name)s_data[msg->%(name)s_length - 1]' % self.GetTranslation(),
'value')
code = [
'static int',
'%(parent_name)s_%(name)s_expand_to_hold_more('
'struct %(parent_name)s *msg)',
'{',
' int tobe_allocated = msg->%(name)s_num_allocated;',
' %(ctype)s* new_data = NULL;',
' tobe_allocated = !tobe_allocated ? 1 : tobe_allocated << 1;',
' new_data = (%(ctype)s*) realloc(msg->%(name)s_data,',
' tobe_allocated * sizeof(%(ctype)s));',
' if (new_data == NULL)',
' return -1;',
' msg->%(name)s_data = new_data;',
' msg->%(name)s_num_allocated = tobe_allocated;',
' return 0;'
'}',
'',
'%(ctype)s %(optpointer)s',
'%(parent_name)s_%(name)s_add('
'struct %(parent_name)s *msg%(optaddarg)s)',
'{',
' if (++msg->%(name)s_length >= msg->%(name)s_num_allocated) {',
' if (%(parent_name)s_%(name)s_expand_to_hold_more(msg)<0)',
' goto error;',
' }' ]
code = TranslateList(code, self.GetTranslation())
code += map(lambda x: ' ' + x, codearrayadd)
code += TranslateList([
' msg->%(name)s_set = 1;',
' return %(optreference)s(msg->%(name)s_data['
'msg->%(name)s_length - 1]);',
'error:',
' --msg->%(name)s_length;',
' return (NULL);',
'}' ], self.GetTranslation())
return code
def CodeComplete(self, structname, var_name):
self._index = 'i'
tmp = self._entry.CodeComplete(structname, self._entry.GetVarName(var_name))
# skip the whole loop if there is nothing to check
if not tmp:
return []
translate = self.GetTranslation({ 'structname' : structname })
code = [
'{',
' int i;',
' for (i = 0; i < %(structname)s->%(name)s_length; ++i) {' ]
code = TranslateList(code, translate)
code += map(lambda x: ' ' + x, tmp)
code += [
' }',
'}' ]
return code
def CodeUnmarshal(self, buf, tag_name, var_name, var_len):
translate = self.GetTranslation({ 'var' : var_name,
'buf' : buf,
'tag' : tag_name,
'init' : self._entry.GetInitializer()})
code = [
'if (%(var)s->%(name)s_length >= %(var)s->%(name)s_num_allocated &&',
' %(parent_name)s_%(name)s_expand_to_hold_more(%(var)s) < 0) {',
' puts("HEY NOW");',
' return (-1);',
'}']
# the unmarshal code directly returns
code = TranslateList(code, translate)
self._index = '%(var)s->%(name)s_length' % translate
code += self._entry.CodeUnmarshal(buf, tag_name,
self._entry.GetVarName(var_name),
self._entry.GetVarLen(var_name))
code += [ '++%(var)s->%(name)s_length;' % translate ]
return code
def CodeMarshal(self, buf, tag_name, var_name, var_len):
code = ['{',
' int i;',
' for (i = 0; i < %(var)s->%(name)s_length; ++i) {' ]
self._index = 'i'
code += self._entry.CodeMarshal(buf, tag_name,
self._entry.GetVarName(var_name),
self._entry.GetVarLen(var_name))
code += [' }',
'}'
]
code = "\n".join(code) % self.GetTranslation({ 'var' : var_name })
return code.split('\n')
def CodeClear(self, structname):
translate = self.GetTranslation({ 'structname' : structname })
codearrayfree = self._entry.CodeArrayFree(
'%(structname)s->%(name)s_data[i]' % self.GetTranslation(
{ 'structname' : structname } ))
code = [ 'if (%(structname)s->%(name)s_set == 1) {' ]
if codearrayfree:
code += [
' int i;',
' for (i = 0; i < %(structname)s->%(name)s_length; ++i) {' ]
code = TranslateList(code, translate)
if codearrayfree:
code += map(lambda x: ' ' + x, codearrayfree)
code += [
' }' ]
code += TranslateList([
' free(%(structname)s->%(name)s_data);',
' %(structname)s->%(name)s_data = NULL;',
' %(structname)s->%(name)s_set = 0;',
' %(structname)s->%(name)s_length = 0;',
' %(structname)s->%(name)s_num_allocated = 0;',
'}'
], translate)
return code
def CodeInitialize(self, name):
code = ['%s->%s_data = NULL;' % (name, self._name),
'%s->%s_length = 0;' % (name, self._name),
'%s->%s_num_allocated = 0;' % (name, self._name)]
return code
def CodeFree(self, structname):
code = self.CodeClear(structname);
code += TranslateList([
'free(%(structname)s->%(name)s_data);' ],
self.GetTranslation({'structname' : structname }))
return code
def Declaration(self):
dcl = ['%s *%s_data;' % (self._ctype, self._name),
'int %s_length;' % self._name,
'int %s_num_allocated;' % self._name ]
return dcl
def NormalizeLine(line):
global white
global cppcomment
line = cppcomment.sub('', line)
line = line.strip()
line = white.sub(' ', line)
return line
def ProcessOneEntry(factory, newstruct, entry):
optional = 0
array = 0
entry_type = ''
name = ''
tag = ''
tag_set = None
separator = ''
fixed_length = ''
tokens = entry.split(' ')
while tokens:
token = tokens[0]
tokens = tokens[1:]
if not entry_type:
if not optional and token == 'optional':
optional = 1
continue
if not array and token == 'array':
array = 1
continue
if not entry_type:
entry_type = token
continue
if not name:
res = re.match(r'^([^\[\]]+)(\[.*\])?$', token)
if not res:
raise RpcGenError(
'Cannot parse name: \"%s\" '
'around line %d' % (entry, line_count))
name = res.group(1)
fixed_length = res.group(2)
if fixed_length:
fixed_length = fixed_length[1:-1]
continue
if not separator:
separator = token
if separator != '=':
raise RpcGenError('Expected "=" after name \"%s\" got %s'
% (name, token))
continue
if not tag_set:
tag_set = 1
if not re.match(r'^(0x)?[0-9]+$', token):
raise RpcGenError('Expected tag number: \"%s\"' % entry)
tag = int(token, 0)
continue
raise RpcGenError('Cannot parse \"%s\"' % entry)
if not tag_set:
raise RpcGenError('Need tag number: \"%s\"' % entry)
# Create the right entry
if entry_type == 'bytes':
if fixed_length:
newentry = factory.EntryBytes(entry_type, name, tag, fixed_length)
else:
newentry = factory.EntryVarBytes(entry_type, name, tag)
elif entry_type == 'int' and not fixed_length:
newentry = factory.EntryInt(entry_type, name, tag)
elif entry_type == 'int64' and not fixed_length:
newentry = factory.EntryInt(entry_type, name, tag, bits=64)
elif entry_type == 'string' and not fixed_length:
newentry = factory.EntryString(entry_type, name, tag)
else:
res = structref.match(entry_type)
if res:
# References another struct defined in our file
newentry = factory.EntryStruct(entry_type, name, tag, res.group(1))
else:
raise RpcGenError('Bad type: "%s" in "%s"' % (entry_type, entry))
structs = []
if optional:
newentry.MakeOptional()
if array:
newentry.MakeArray()
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.Verify()
if array:
# We need to encapsulate this entry into a struct
newname = newentry.Name()+ '_array'
# Now borgify the new entry.
newentry = factory.EntryArray(newentry)
newentry.SetStruct(newstruct)
newentry.SetLineCount(line_count)
newentry.MakeArray()
newstruct.AddEntry(newentry)
return structs
def ProcessStruct(factory, data):
tokens = data.split(' ')
# First three tokens are: 'struct' 'name' '{'
newstruct = factory.Struct(tokens[1])
inside = ' '.join(tokens[3:-1])
tokens = inside.split(';')
structs = []
for entry in tokens:
entry = NormalizeLine(entry)
if not entry:
continue
# It's possible that new structs get defined in here
structs.extend(ProcessOneEntry(factory, newstruct, entry))
structs.append(newstruct)
return structs
def GetNextStruct(file):
global line_count
global cppdirect
got_struct = 0
processed_lines = []
have_c_comment = 0
data = ''
while 1:
line = file.readline()
if not line:
break
line_count += 1
line = line[:-1]
if not have_c_comment and re.search(r'/\*', line):
if re.search(r'/\*.*?\*/', line):
line = re.sub(r'/\*.*?\*/', '', line)
else:
line = re.sub(r'/\*.*$', '', line)
have_c_comment = 1
if have_c_comment:
if not re.search(r'\*/', line):
continue
have_c_comment = 0
line = re.sub(r'^.*\*/', '', line)
line = NormalizeLine(line)
if not line:
continue
if not got_struct:
if re.match(r'#include ["<].*[>"]', line):
cppdirect.append(line)
continue
if re.match(r'^#(if( |def)|endif)', line):
cppdirect.append(line)
continue
if re.match(r'^#define', line):
headerdirect.append(line)
continue
if not structdef.match(line):
raise RpcGenError('Missing struct on line %d: %s'
% (line_count, line))
else:
got_struct = 1
data += line
continue
# We are inside the struct
tokens = line.split('}')
if len(tokens) == 1:
data += ' ' + line
continue
if len(tokens[1]):
raise RpcGenError('Trailing garbage after struct on line %d'
% line_count)
# We found the end of the struct
data += ' %s}' % tokens[0]
break
# Remove any comments, that might be in there
data = re.sub(r'/\*.*\*/', '', data)
return data
def Parse(factory, file):
"""
Parses the input file and returns C code and corresponding header file.
"""
entities = []
while 1:
# Just gets the whole struct nicely formatted
data = GetNextStruct(file)
if not data:
break
entities.extend(ProcessStruct(factory, data))
return entities
class CCodeGenerator:
def __init__(self):
pass
def GuardName(self, name):
# Use the complete provided path to the input file, with all
# non-identifier characters replaced with underscores, to
# reduce the chance of a collision between guard macros.
return '_' + nonident.sub('_', name).upper() + '_'
def HeaderPreamble(self, name):
guard = self.GuardName(name)
pre = (
'/*\n'
' * Automatically generated from %s\n'
' */\n\n'
'#ifndef %s\n'
'#define %s\n\n' ) % (
name, guard, guard)
for statement in headerdirect:
pre += '%s\n' % statement
if headerdirect:
pre += '\n'
pre += (
'#include <event2/util.h> /* for ev_uint*_t */\n'
'#include <event2/rpc.h>\n'
)
return pre
def HeaderPostamble(self, name):
guard = self.GuardName(name)
return '#endif /* %s */' % guard
def BodyPreamble(self, name, header_file):
global _NAME
global _VERSION
slash = header_file.rfind('/')
if slash != -1:
header_file = header_file[slash+1:]
pre = ( '/*\n'
' * Automatically generated from %s\n'
' * by %s/%s. DO NOT EDIT THIS FILE.\n'
' */\n\n' ) % (name, _NAME, _VERSION)
pre += ( '#include <stdlib.h>\n'
'#include <string.h>\n'
'#include <assert.h>\n'
'#include <event2/event-config.h>\n'
'#include <event2/event.h>\n'
'#include <event2/buffer.h>\n'
'#include <event2/tag.h>\n\n'
'#ifdef _EVENT___func__\n'
'#define __func__ _EVENT___func__\n'
'#endif\n\n'
)
for statement in cppdirect:
pre += '%s\n' % statement
pre += '\n#include "%s"\n\n' % header_file
pre += 'void event_warn(const char *fmt, ...);\n'
pre += 'void event_warnx(const char *fmt, ...);\n\n'
return pre
def HeaderFilename(self, filename):
return '.'.join(filename.split('.')[:-1]) + '.h'
def CodeFilename(self, filename):
return '.'.join(filename.split('.')[:-1]) + '.gen.c'
def Struct(self, name):
return StructCCode(name)
def EntryBytes(self, entry_type, name, tag, fixed_length):
return EntryBytes(entry_type, name, tag, fixed_length)
def EntryVarBytes(self, entry_type, name, tag):
return EntryVarBytes(entry_type, name, tag)
def EntryInt(self, entry_type, name, tag, bits=32):
return EntryInt(entry_type, name, tag, bits)
def EntryString(self, entry_type, name, tag):
return EntryString(entry_type, name, tag)
def EntryStruct(self, entry_type, name, tag, struct_name):
return EntryStruct(entry_type, name, tag, struct_name)
def EntryArray(self, entry):
return EntryArray(entry)
class Usage(RpcGenError):
def __init__(self, argv0):
RpcGenError.__init__("usage: %s input.rpc [[output.h] output.c]"
% argv0)
class CommandLine:
def __init__(self, argv):
"""Initialize a command-line to launch event_rpcgen, as if
from a command-line with CommandLine(sys.argv). If you're
calling this directly, remember to provide a dummy value
for sys.argv[0]
"""
self.filename = None
self.header_file = None
self.impl_file = None
self.factory = CCodeGenerator()
if len(argv) < 2 or len(argv) > 4:
raise Usage(argv[0])
self.filename = argv[1].replace('\\', '/')
if len(argv) == 3:
self.impl_file = argv[2].replace('\\', '/')
if len(argv) == 4:
self.header_file = argv[2].replace('\\', '/')
self.impl_file = argv[3].replace('\\', '/')
if not self.filename:
raise Usage(argv[0])
if not self.impl_file:
self.impl_file = self.factory.CodeFilename(self.filename)
if not self.header_file:
self.header_file = self.factory.HeaderFilename(self.impl_file)
if not self.impl_file.endswith('.c'):
raise RpcGenError("can only generate C implementation files")
if not self.header_file.endswith('.h'):
raise RpcGenError("can only generate C header files")
def run(self):
filename = self.filename
header_file = self.header_file
impl_file = self.impl_file
factory = self.factory
print >>sys.stderr, 'Reading \"%s\"' % filename
fp = open(filename, 'r')
entities = Parse(factory, fp)
fp.close()
print >>sys.stderr, '... creating "%s"' % header_file
header_fp = open(header_file, 'w')
print >>header_fp, factory.HeaderPreamble(filename)
# Create forward declarations: allows other structs to reference
# each other
for entry in entities:
entry.PrintForwardDeclaration(header_fp)
print >>header_fp, ''
for entry in entities:
entry.PrintTags(header_fp)
entry.PrintDeclaration(header_fp)
print >>header_fp, factory.HeaderPostamble(filename)
header_fp.close()
print >>sys.stderr, '... creating "%s"' % impl_file
impl_fp = open(impl_file, 'w')
print >>impl_fp, factory.BodyPreamble(filename, header_file)
for entry in entities:
entry.PrintCode(impl_fp)
impl_fp.close()
if __name__ == '__main__':
try:
CommandLine(sys.argv).run()
sys.exit(0)
except RpcGenError, e:
print >>sys.stderr, e
sys.exit(1)
except EnvironmentError, e:
if e.filename and e.strerror:
print >>sys.stderr, "%s: %s" % (e.filename, e.strerror)
sys.exit(1)
elif e.strerror:
print >> sys.stderr, e.strerror
sys.exit(1)
else:
raise
| gpl-2.0 |
technosaurus/samba4-GPL2 | webapps/qooxdoo-0.6.5-sdk/frontend/framework/tool/modules/ElementPath.py | 128 | 5954 | #
# ElementTree
# $Id: ElementPath.py 1858 2004-06-17 21:31:41Z Fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2004 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
import re
xpath_tokenizer = re.compile(
"(::|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/:\[\]\(\)@=\s]+)|\s+"
).findall
class xpath_descendant_or_self:
pass
##
# Wrapper for a compiled XPath.
class Path:
##
# Create an Path instance from an XPath expression.
def __init__(self, path):
tokens = xpath_tokenizer(path)
# the current version supports 'path/path'-style expressions only
self.path = []
self.tag = None
if tokens and tokens[0][0] == "/":
raise SyntaxError("cannot use absolute path on element")
while tokens:
op, tag = tokens.pop(0)
if tag or op == "*":
self.path.append(tag or op)
elif op == ".":
pass
elif op == "/":
self.path.append(xpath_descendant_or_self())
continue
else:
raise SyntaxError("unsupported path syntax (%s)" % op)
if tokens:
op, tag = tokens.pop(0)
if op != "/":
raise SyntaxError(
"expected path separator (%s)" % (op or tag)
)
if self.path and isinstance(self.path[-1], xpath_descendant_or_self):
raise SyntaxError("path cannot end with //")
if len(self.path) == 1 and isinstance(self.path[0], type("")):
self.tag = self.path[0]
##
# Find first matching object.
def find(self, element):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return None
return nodeset[0]
for elem in element:
if elem.tag == tag:
return elem
return None
##
# Find text for first matching object.
def findtext(self, element, default=None):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return default
return nodeset[0].text or ""
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
##
# Find all matching objects.
def findall(self, element):
nodeset = [element]
index = 0
while 1:
try:
path = self.path[index]
index = index + 1
except IndexError:
return nodeset
set = []
if isinstance(path, xpath_descendant_or_self):
try:
tag = self.path[index]
if not isinstance(tag, type("")):
tag = None
else:
index = index + 1
except IndexError:
tag = None # invalid path
for node in nodeset:
new = list(node.getiterator(tag))
if new and new[0] is node:
set.extend(new[1:])
else:
set.extend(new)
else:
for node in nodeset:
for node in node:
if path == "*" or node.tag == path:
set.append(node)
if not set:
return []
nodeset = set
_cache = {}
##
# (Internal) Compile path.
def _compile(path):
p = _cache.get(path)
if p is not None:
return p
p = Path(path)
if len(_cache) >= 100:
_cache.clear()
_cache[path] = p
return p
##
# Find first matching object.
def find(element, path):
return _compile(path).find(element)
##
# Find text for first matching object.
def findtext(element, path, default=None):
return _compile(path).findtext(element, default)
##
# Find all matching objects.
def findall(element, path):
return _compile(path).findall(element)
| gpl-2.0 |
jaywink/social-federation | federation/entities/diaspora/utils.py | 2 | 1742 | from dateutil.tz import tzlocal, tzutc
from lxml import etree
def ensure_timezone(dt, tz=None):
"""
Make sure the datetime <dt> has a timezone set, using timezone <tz> if it
doesn't. <tz> defaults to the local timezone.
"""
if dt.tzinfo is None:
return dt.replace(tzinfo=tz or tzlocal())
else:
return dt
def format_dt(dt):
"""
Format a datetime in the way that D* nodes expect.
"""
return ensure_timezone(dt).astimezone(tzutc()).strftime(
'%Y-%m-%dT%H:%M:%SZ'
)
def struct_to_xml(node, struct):
"""
Turn a list of dicts into XML nodes with tag names taken from the dict
keys and element text taken from dict values. This is a list of dicts
so that the XML nodes can be ordered in the XML output.
"""
for obj in struct:
for k, v in obj.items():
etree.SubElement(node, k).text = v
def get_full_xml_representation(entity, private_key):
"""Get full XML representation of an entity.
This contains the <XML><post>..</post></XML> wrapper.
Accepts either a Base entity or a Diaspora entity.
Author `private_key` must be given so that certain entities can be signed.
"""
from federation.entities.diaspora.mappers import get_outbound_entity
diaspora_entity = get_outbound_entity(entity, private_key)
xml = diaspora_entity.to_xml()
return "<XML><post>%s</post></XML>" % etree.tostring(xml).decode("utf-8")
def add_element_to_doc(doc, tag, value):
"""Set text value of an etree.Element of tag, appending a new element with given tag if it doesn't exist."""
element = doc.find(".//%s" % tag)
if element is None:
element = etree.SubElement(doc, tag)
element.text = value
| bsd-3-clause |
testbetta/repo-pub | subcmds/overview.py | 83 | 2727 | #
# Copyright (C) 2012 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from color import Coloring
from command import PagedCommand
class Overview(PagedCommand):
common = True
helpSummary = "Display overview of unmerged project branches"
helpUsage = """
%prog [--current-branch] [<project>...]
"""
helpDescription = """
The '%prog' command is used to display an overview of the projects branches,
and list any local commits that have not yet been merged into the project.
The -b/--current-branch option can be used to restrict the output to only
branches currently checked out in each project. By default, all branches
are displayed.
"""
def _Options(self, p):
p.add_option('-b', '--current-branch',
dest="current_branch", action="store_true",
help="Consider only checked out branches")
def Execute(self, opt, args):
all_branches = []
for project in self.GetProjects(args):
br = [project.GetUploadableBranch(x)
for x in project.GetBranches()]
br = [x for x in br if x]
if opt.current_branch:
br = [x for x in br if x.name == project.CurrentBranch]
all_branches.extend(br)
if not all_branches:
return
class Report(Coloring):
def __init__(self, config):
Coloring.__init__(self, config, 'status')
self.project = self.printer('header', attr='bold')
self.text = self.printer('text')
out = Report(all_branches[0].project.config)
out.text("Deprecated. See repo info -o.")
out.nl()
out.project('Projects Overview')
out.nl()
project = None
for branch in all_branches:
if project != branch.project:
project = branch.project
out.nl()
out.project('project %s/' % project.relpath)
out.nl()
commits = branch.commits
date = branch.date
print('%s %-33s (%2d commit%s, %s)' % (
branch.name == project.CurrentBranch and '*' or ' ',
branch.name,
len(commits),
len(commits) != 1 and 's' or ' ',
date))
for commit in commits:
print('%-35s - %s' % ('', commit))
| apache-2.0 |
FighterLYL/autograd | autograd/util.py | 4 | 4852 | from __future__ import absolute_import
from __future__ import print_function
import autograd.numpy as np
import itertools as it
from autograd.core import grad, safe_type
from copy import copy
from autograd.numpy.use_gpu_numpy import use_gpu_numpy
from autograd.container_types import ListNode, TupleNode
import six
from six.moves import map
from six.moves import range
from six.moves import zip
if use_gpu_numpy():
garray_obj = np.garray
array_types = (np.ndarray, garray_obj)
EPS, RTOL, ATOL = 1e-4, 1e-2, 1e-2
else:
garray_obj = ()
array_types = (np.ndarray,)
EPS, RTOL, ATOL = 1e-4, 1e-4, 1e-6
def nd(f, *args):
unary_f = lambda x : f(*x)
return unary_nd(unary_f, args)
def unary_nd(f, x, eps=EPS):
if isinstance(x, array_types):
if np.iscomplexobj(x):
nd_grad = np.zeros(x.shape) + 0j
elif isinstance(x, garray_obj):
nd_grad = np.array(np.zeros(x.shape), dtype=np.gpu_float32)
else:
nd_grad = np.zeros(x.shape)
for dims in it.product(*list(map(range, x.shape))):
nd_grad[dims] = unary_nd(indexed_function(f, x, dims), x[dims])
return nd_grad
elif isinstance(x, tuple):
return tuple([unary_nd(indexed_function(f, tuple(x), i), x[i])
for i in range(len(x))])
elif isinstance(x, dict):
return {k : unary_nd(indexed_function(f, x, k), v) for k, v in six.iteritems(x)}
elif isinstance(x, list):
return [unary_nd(indexed_function(f, x, i), v) for i, v in enumerate(x)]
elif np.iscomplexobj(x):
result = (f(x + eps/2) - f(x - eps/2)) / eps \
- 1j*(f(x + 1j*eps/2) - f(x - 1j*eps/2)) / eps
return type(safe_type(x))(result)
else:
return type(safe_type(x))((f(x + eps/2) - f(x - eps/2)) / eps)
def indexed_function(fun, arg, index):
def partial_function(x):
local_arg = copy(arg)
if isinstance(local_arg, tuple):
local_arg = local_arg[:index] + (x,) + local_arg[index+1:]
elif isinstance(local_arg, list):
local_arg = local_arg[:index] + [x] + local_arg[index+1:]
else:
local_arg[index] = x
return fun(local_arg)
return partial_function
def check_equivalent(A, B, rtol=RTOL, atol=ATOL):
assert base_class(type(A)) is base_class(type(B)),\
"Types are: {0} and {1}".format(type(A), type(B))
if isinstance(A, (tuple, list)):
for a, b in zip(A, B): check_equivalent(a, b)
elif isinstance(A, dict):
assert len(A) == len(B)
for k in A: check_equivalent(A[k], B[k])
else:
if isinstance(A, np.ndarray):
assert A.shape == B.shape, "Shapes are analytic: {0} and numeric: {1}".format(
A.shape, B.shape)
assert A.dtype == B.dtype, "Types are analytic: {0} and numeric: {1}".format(
A.dtype, B.dtype)
assert np.allclose(A, B, rtol=rtol, atol=atol), \
"Diffs are:\n{0}.\nanalytic is:\n{A}.\nnumeric is:\n{B}.".format(A - B, A=A, B=B)
def check_grads(fun, *args):
if not args:
raise Exception("No args given")
exact = tuple([grad(fun, i)(*args) for i in range(len(args))])
numeric = nd(fun, *args)
check_equivalent(exact, numeric)
def to_scalar(x):
if isinstance(x, list) or isinstance(x, ListNode) or \
isinstance(x, tuple) or isinstance(x, TupleNode):
return sum([to_scalar(item) for item in x])
return np.sum(np.real(np.sin(x)))
def quick_grad_check(fun, arg0, extra_args=(), kwargs={}, verbose=True,
eps=EPS, rtol=RTOL, atol=ATOL, rs=None):
"""Checks the gradient of a function (w.r.t. to its first arg) in a random direction"""
if verbose:
print("Checking gradient of {0} at {1}".format(fun, arg0))
if rs is None:
rs = np.random.RandomState()
random_dir = rs.standard_normal(np.shape(arg0))
random_dir = random_dir / np.sqrt(np.sum(random_dir * random_dir))
unary_fun = lambda x : fun(arg0 + x * random_dir, *extra_args, **kwargs)
numeric_grad = unary_nd(unary_fun, 0.0, eps=eps)
analytic_grad = np.sum(grad(fun)(arg0, *extra_args, **kwargs) * random_dir)
assert np.allclose(numeric_grad, analytic_grad, rtol=rtol, atol=atol), \
"Check failed! nd={0}, ad={1}".format(numeric_grad, analytic_grad)
if verbose:
print("Gradient projection OK (numeric grad: {0}, analytic grad: {1})".format(
numeric_grad, analytic_grad))
equivalence_class = {}
for float_type in [np.float64, np.float32, np.float16]:
equivalence_class[float_type] = float
for complex_type in [np.complex64, np.complex128]:
equivalence_class[complex_type] = complex
def base_class(t):
if t in equivalence_class:
return equivalence_class[t]
else:
return t
| mit |
lmallin/coverage_test | python_venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/exceptions.py | 515 | 5599 | from __future__ import absolute_import
# Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
# Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class NewConnectionError(ConnectTimeoutError, PoolError):
"Raised when we fail to establish a new connection. Usually ECONNREFUSED."
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class SubjectAltNameWarning(SecurityWarning):
"Warned when connecting to a host with a certificate missing a SAN."
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class SNIMissingWarning(HTTPWarning):
"Warned when making a HTTPS request without SNI available."
pass
class DependencyWarning(HTTPWarning):
"""
Warned when an attempt is made to import a module with missing optional
dependencies.
"""
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
class ProxySchemeUnknown(AssertionError, ValueError):
"ProxyManager does not support the supplied scheme"
# TODO(t-8ch): Stop inheriting from AssertionError in v2.0.
def __init__(self, scheme):
message = "Not supported proxy scheme %s" % scheme
super(ProxySchemeUnknown, self).__init__(message)
class HeaderParsingError(HTTPError):
"Raised by assert_header_parsing, but we convert it to a log.warning statement."
def __init__(self, defects, unparsed_data):
message = '%s, unparsed data: %r' % (defects or 'Unknown', unparsed_data)
super(HeaderParsingError, self).__init__(message)
| mit |
automl/RoBO | robo/maximizers/direct.py | 1 | 2696 | import os
import sys
try:
import DIRECT
except ImportError:
raise ImportError("""
In order to use this module, DIRECT need to be installed. Try running
pip install direct
""")
import numpy as np
from robo.maximizers.base_maximizer import BaseMaximizer
class Direct(BaseMaximizer):
def __init__(self, objective_function, lower, upper,
n_func_evals=400, n_iters=200, verbose=True):
"""
Interface for the DIRECT algorithm by D. R. Jones, C. D. Perttunen
and B. E. Stuckmann
Parameters
----------
objective_function: acquisition function
The acquisition function which will be maximized
lower: np.ndarray (D)
Lower bounds of the input space
upper: np.ndarray (D)
Upper bounds of the input space
n_func_evals: int
The maximum number of function evaluations
n_iters: int
The maximum number of iterations
verbose: bool
Suppress Direct's output.
"""
self.n_func_evals = n_func_evals
self.n_iters = n_iters
self.verbose = verbose
super(Direct, self).__init__(objective_function, lower, upper)
def _direct_acquisition_fkt_wrapper(self, acq_f):
def _l(x, user_data):
return -acq_f(np.array([x])), 0
return _l
def maximize(self):
"""
Maximizes the given acquisition function.
Returns
-------
np.ndarray(N,D)
Point with highest acquisition value.
"""
if self.verbose:
x, _, _ = DIRECT.solve(self._direct_acquisition_fkt_wrapper(self.objective_func),
l=[self.lower],
u=[self.upper],
maxT=self.n_iters,
maxf=self.n_func_evals)
else:
fileno = sys.stdout.fileno()
with os.fdopen(os.dup(fileno), 'wb') as stdout:
with os.fdopen(os.open(os.devnull, os.O_WRONLY), 'wb') as devnull:
sys.stdout.flush();
os.dup2(devnull.fileno(), fileno) # redirect
x, _, _ = DIRECT.solve(self._direct_acquisition_fkt_wrapper(self.objective_func),
l=[self.lower],
u=[self.upper],
maxT=self.n_iters,
maxf=self.n_func_evals)
sys.stdout.flush();
os.dup2(stdout.fileno(), fileno) # restore
return x
| bsd-3-clause |
clienthax/osmc | package/mediacenter-addon-osmc/src/script.module.elementtree/lib/elementtree/ElementPath.py | 25 | 5953 | #
# ElementTree
# $Id: ElementPath.py 3225 2007-08-27 21:32:08Z fredrik $
#
# limited xpath support for element trees
#
# history:
# 2003-05-23 fl created
# 2003-05-28 fl added support for // etc
# 2003-08-27 fl fixed parsing of periods in element names
#
# Copyright (c) 2003-2004 by Fredrik Lundh. All rights reserved.
#
# fredrik@pythonware.com
# http://www.pythonware.com
#
# --------------------------------------------------------------------
# The ElementTree toolkit is
#
# Copyright (c) 1999-2007 by Fredrik Lundh
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# Secret Labs AB or the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
##
# Implementation module for XPath support. There's usually no reason
# to import this module directly; the <b>ElementTree</b> does this for
# you, if needed.
##
import re
xpath_tokenizer = re.compile(
"(::|\.\.|\(\)|[/.*:\[\]\(\)@=])|((?:\{[^}]+\})?[^/:\[\]\(\)@=\s]+)|\s+"
).findall
class xpath_descendant_or_self:
pass
##
# Wrapper for a compiled XPath.
class Path:
##
# Create an Path instance from an XPath expression.
def __init__(self, path):
tokens = xpath_tokenizer(path)
# the current version supports 'path/path'-style expressions only
self.path = []
self.tag = None
if tokens and tokens[0][0] == "/":
raise SyntaxError("cannot use absolute path on element")
while tokens:
op, tag = tokens.pop(0)
if tag or op == "*":
self.path.append(tag or op)
elif op == ".":
pass
elif op == "/":
self.path.append(xpath_descendant_or_self())
continue
else:
raise SyntaxError("unsupported path syntax (%s)" % op)
if tokens:
op, tag = tokens.pop(0)
if op != "/":
raise SyntaxError(
"expected path separator (%s)" % (op or tag)
)
if self.path and isinstance(self.path[-1], xpath_descendant_or_self):
raise SyntaxError("path cannot end with //")
if len(self.path) == 1 and isinstance(self.path[0], type("")):
self.tag = self.path[0]
##
# Find first matching object.
def find(self, element):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return None
return nodeset[0]
for elem in element:
if elem.tag == tag:
return elem
return None
##
# Find text for first matching object.
def findtext(self, element, default=None):
tag = self.tag
if tag is None:
nodeset = self.findall(element)
if not nodeset:
return default
return nodeset[0].text or ""
for elem in element:
if elem.tag == tag:
return elem.text or ""
return default
##
# Find all matching objects.
def findall(self, element):
nodeset = [element]
index = 0
while 1:
try:
path = self.path[index]
index = index + 1
except IndexError:
return nodeset
set = []
if isinstance(path, xpath_descendant_or_self):
try:
tag = self.path[index]
if not isinstance(tag, type("")):
tag = None
else:
index = index + 1
except IndexError:
tag = None # invalid path
for node in nodeset:
new = list(node.getiterator(tag))
if new and new[0] is node:
set.extend(new[1:])
else:
set.extend(new)
else:
for node in nodeset:
for node in node:
if path == "*" or node.tag == path:
set.append(node)
if not set:
return []
nodeset = set
_cache = {}
##
# (Internal) Compile path.
def _compile(path):
p = _cache.get(path)
if p is not None:
return p
p = Path(path)
if len(_cache) >= 100:
_cache.clear()
_cache[path] = p
return p
##
# Find first matching object.
def find(element, path):
return _compile(path).find(element)
##
# Find text for first matching object.
def findtext(element, path, default=None):
return _compile(path).findtext(element, default)
##
# Find all matching objects.
def findall(element, path):
return _compile(path).findall(element)
| gpl-2.0 |
grlee77/numpy | numpy/core/tests/test_api.py | 2 | 21171 | import sys
import numpy as np
from numpy.core._rational_tests import rational
import pytest
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
HAS_REFCOUNT
)
# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
NPY_RELAXED_STRIDES_CHECKING = np.ones((10, 1), order='C').flags.f_contiguous
def test_array_array():
tobj = type(object)
ones11 = np.ones((1, 1), np.float64)
tndarray = type(ones11)
# Test is_ndarray
assert_equal(np.array(ones11, dtype=np.float64), ones11)
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(tndarray)
np.array(ones11)
assert_equal(old_refcount, sys.getrefcount(tndarray))
# test None
assert_equal(np.array(None, dtype=np.float64),
np.array(np.nan, dtype=np.float64))
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(tobj)
np.array(None, dtype=np.float64)
assert_equal(old_refcount, sys.getrefcount(tobj))
# test scalar
assert_equal(np.array(1.0, dtype=np.float64),
np.ones((), dtype=np.float64))
if HAS_REFCOUNT:
old_refcount = sys.getrefcount(np.float64)
np.array(np.array(1.0, dtype=np.float64), dtype=np.float64)
assert_equal(old_refcount, sys.getrefcount(np.float64))
# test string
S2 = np.dtype((bytes, 2))
S3 = np.dtype((bytes, 3))
S5 = np.dtype((bytes, 5))
assert_equal(np.array(b"1.0", dtype=np.float64),
np.ones((), dtype=np.float64))
assert_equal(np.array(b"1.0").dtype, S3)
assert_equal(np.array(b"1.0", dtype=bytes).dtype, S3)
assert_equal(np.array(b"1.0", dtype=S2), np.array(b"1."))
assert_equal(np.array(b"1", dtype=S5), np.ones((), dtype=S5))
# test string
U2 = np.dtype((str, 2))
U3 = np.dtype((str, 3))
U5 = np.dtype((str, 5))
assert_equal(np.array("1.0", dtype=np.float64),
np.ones((), dtype=np.float64))
assert_equal(np.array("1.0").dtype, U3)
assert_equal(np.array("1.0", dtype=str).dtype, U3)
assert_equal(np.array("1.0", dtype=U2), np.array(str("1.")))
assert_equal(np.array("1", dtype=U5), np.ones((), dtype=U5))
builtins = getattr(__builtins__, '__dict__', __builtins__)
assert_(hasattr(builtins, 'get'))
# test memoryview
dat = np.array(memoryview(b'1.0'), dtype=np.float64)
assert_equal(dat, [49.0, 46.0, 48.0])
assert_(dat.dtype.type is np.float64)
dat = np.array(memoryview(b'1.0'))
assert_equal(dat, [49, 46, 48])
assert_(dat.dtype.type is np.uint8)
# test array interface
a = np.array(100.0, dtype=np.float64)
o = type("o", (object,),
dict(__array_interface__=a.__array_interface__))
assert_equal(np.array(o, dtype=np.float64), a)
# test array_struct interface
a = np.array([(1, 4.0, 'Hello'), (2, 6.0, 'World')],
dtype=[('f0', int), ('f1', float), ('f2', str)])
o = type("o", (object,),
dict(__array_struct__=a.__array_struct__))
## wasn't what I expected... is np.array(o) supposed to equal a ?
## instead we get a array([...], dtype=">V18")
assert_equal(bytes(np.array(o).data), bytes(a.data))
# test array
o = type("o", (object,),
dict(__array__=lambda *x: np.array(100.0, dtype=np.float64)))()
assert_equal(np.array(o, dtype=np.float64), np.array(100.0, np.float64))
# test recursion
nested = 1.5
for i in range(np.MAXDIMS):
nested = [nested]
# no error
np.array(nested)
# Exceeds recursion limit
assert_raises(ValueError, np.array, [nested], dtype=np.float64)
# Try with lists...
assert_equal(np.array([None] * 10, dtype=np.float64),
np.full((10,), np.nan, dtype=np.float64))
assert_equal(np.array([[None]] * 10, dtype=np.float64),
np.full((10, 1), np.nan, dtype=np.float64))
assert_equal(np.array([[None] * 10], dtype=np.float64),
np.full((1, 10), np.nan, dtype=np.float64))
assert_equal(np.array([[None] * 10] * 10, dtype=np.float64),
np.full((10, 10), np.nan, dtype=np.float64))
assert_equal(np.array([1.0] * 10, dtype=np.float64),
np.ones((10,), dtype=np.float64))
assert_equal(np.array([[1.0]] * 10, dtype=np.float64),
np.ones((10, 1), dtype=np.float64))
assert_equal(np.array([[1.0] * 10], dtype=np.float64),
np.ones((1, 10), dtype=np.float64))
assert_equal(np.array([[1.0] * 10] * 10, dtype=np.float64),
np.ones((10, 10), dtype=np.float64))
# Try with tuples
assert_equal(np.array((None,) * 10, dtype=np.float64),
np.full((10,), np.nan, dtype=np.float64))
assert_equal(np.array([(None,)] * 10, dtype=np.float64),
np.full((10, 1), np.nan, dtype=np.float64))
assert_equal(np.array([(None,) * 10], dtype=np.float64),
np.full((1, 10), np.nan, dtype=np.float64))
assert_equal(np.array([(None,) * 10] * 10, dtype=np.float64),
np.full((10, 10), np.nan, dtype=np.float64))
assert_equal(np.array((1.0,) * 10, dtype=np.float64),
np.ones((10,), dtype=np.float64))
assert_equal(np.array([(1.0,)] * 10, dtype=np.float64),
np.ones((10, 1), dtype=np.float64))
assert_equal(np.array([(1.0,) * 10], dtype=np.float64),
np.ones((1, 10), dtype=np.float64))
assert_equal(np.array([(1.0,) * 10] * 10, dtype=np.float64),
np.ones((10, 10), dtype=np.float64))
@pytest.mark.parametrize("array", [True, False])
def test_array_impossible_casts(array):
# All builtin types can forst cast as least theoretically
# but user dtypes cannot necessarily.
rt = rational(1, 2)
if array:
rt = np.array(rt)
with assert_raises(ValueError):
np.array(rt, dtype="M8")
def test_fastCopyAndTranspose():
# 0D array
a = np.array(2)
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
# 1D array
a = np.array([3, 2, 7, 0])
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
# 2D array
a = np.arange(6).reshape(2, 3)
b = np.fastCopyAndTranspose(a)
assert_equal(b, a.T)
assert_(b.flags.owndata)
def test_array_astype():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Default behavior: allows unsafe casts, keeps memory layout,
# always copies.
b = a.astype('i4')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(a.strides, b.strides)
b = a.T.astype('i4')
assert_equal(a.T, b)
assert_equal(b.dtype, np.dtype('i4'))
assert_equal(a.T.strides, b.strides)
b = a.astype('f4')
assert_equal(a, b)
assert_(not (a is b))
# copy=False parameter can sometimes skip a copy
b = a.astype('f4', copy=False)
assert_(a is b)
# order parameter allows overriding of the memory layout,
# forcing a copy if the layout is wrong
b = a.astype('f4', order='F', copy=False)
assert_equal(a, b)
assert_(not (a is b))
assert_(b.flags.f_contiguous)
b = a.astype('f4', order='C', copy=False)
assert_equal(a, b)
assert_(a is b)
assert_(b.flags.c_contiguous)
# casting parameter allows catching bad casts
b = a.astype('c8', casting='safe')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('c8'))
assert_raises(TypeError, a.astype, 'i4', casting='safe')
# subok=False passes through a non-subclassed array
b = a.astype('f4', subok=0, copy=False)
assert_(a is b)
class MyNDArray(np.ndarray):
pass
a = np.array([[0, 1, 2], [3, 4, 5]], dtype='f4').view(MyNDArray)
# subok=True passes through a subclass
b = a.astype('f4', subok=True, copy=False)
assert_(a is b)
# subok=True is default, and creates a subtype on a cast
b = a.astype('i4', copy=False)
assert_equal(a, b)
assert_equal(type(b), MyNDArray)
# subok=False never returns a subclass
b = a.astype('f4', subok=False, copy=False)
assert_equal(a, b)
assert_(not (a is b))
assert_(type(b) is not MyNDArray)
# Make sure converting from string object to fixed length string
# does not truncate.
a = np.array([b'a'*100], dtype='O')
b = a.astype('S')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('S100'))
a = np.array([u'a'*100], dtype='O')
b = a.astype('U')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('U100'))
# Same test as above but for strings shorter than 64 characters
a = np.array([b'a'*10], dtype='O')
b = a.astype('S')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('S10'))
a = np.array([u'a'*10], dtype='O')
b = a.astype('U')
assert_equal(a, b)
assert_equal(b.dtype, np.dtype('U10'))
a = np.array(123456789012345678901234567890, dtype='O').astype('S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array(123456789012345678901234567890, dtype='O').astype('U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array([123456789012345678901234567890], dtype='O').astype('S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array([123456789012345678901234567890], dtype='O').astype('U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array(123456789012345678901234567890, dtype='S')
assert_array_equal(a, np.array(b'1234567890' * 3, dtype='S30'))
a = np.array(123456789012345678901234567890, dtype='U')
assert_array_equal(a, np.array(u'1234567890' * 3, dtype='U30'))
a = np.array(u'a\u0140', dtype='U')
b = np.ndarray(buffer=a, dtype='uint32', shape=2)
assert_(b.size == 2)
a = np.array([1000], dtype='i4')
assert_raises(TypeError, a.astype, 'S1', casting='safe')
a = np.array(1000, dtype='i4')
assert_raises(TypeError, a.astype, 'U1', casting='safe')
@pytest.mark.parametrize("t",
np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
)
def test_array_astype_warning(t):
# test ComplexWarning when casting from complex to float or int
a = np.array(10, dtype=np.complex_)
assert_warns(np.ComplexWarning, a.astype, t)
@pytest.mark.parametrize(["dtype", "out_dtype"],
[(np.bytes_, np.bool_),
(np.unicode_, np.bool_),
(np.dtype("S10,S9"), np.dtype("?,?"))])
def test_string_to_boolean_cast(dtype, out_dtype):
"""
Currently, for `astype` strings are cast to booleans effectively by
calling `bool(int(string)`. This is not consistent (see gh-9875) and
will eventually be deprecated.
"""
arr = np.array(["10", "10\0\0\0", "0\0\0", "0"], dtype=dtype)
expected = np.array([True, True, False, False], dtype=out_dtype)
assert_array_equal(arr.astype(out_dtype), expected)
@pytest.mark.parametrize(["dtype", "out_dtype"],
[(np.bytes_, np.bool_),
(np.unicode_, np.bool_),
(np.dtype("S10,S9"), np.dtype("?,?"))])
def test_string_to_boolean_cast_errors(dtype, out_dtype):
"""
These currently error out, since cast to integers fails, but should not
error out in the future.
"""
for invalid in ["False", "True", "", "\0", "non-empty"]:
arr = np.array([invalid], dtype=dtype)
with assert_raises(ValueError):
arr.astype(out_dtype)
@pytest.mark.parametrize("str_type", [str, bytes, np.str_, np.unicode_])
@pytest.mark.parametrize("scalar_type",
[np.complex64, np.complex128, np.clongdouble])
def test_string_to_complex_cast(str_type, scalar_type):
value = scalar_type(b"1+3j")
assert scalar_type(value) == 1+3j
assert np.array([value], dtype=object).astype(scalar_type)[()] == 1+3j
assert np.array(value).astype(scalar_type)[()] == 1+3j
arr = np.zeros(1, dtype=scalar_type)
arr[0] = value
assert arr[0] == 1+3j
@pytest.mark.parametrize("dtype", np.typecodes["AllFloat"])
def test_none_to_nan_cast(dtype):
# Note that at the time of writing this test, the scalar constructors
# reject None
arr = np.zeros(1, dtype=dtype)
arr[0] = None
assert np.isnan(arr)[0]
assert np.isnan(np.array(None, dtype=dtype))[()]
assert np.isnan(np.array([None], dtype=dtype))[0]
assert np.isnan(np.array(None).astype(dtype))[()]
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
# Simple copy
np.copyto(a, 1.5)
assert_equal(a, 1.5)
np.copyto(a.T, 2.5)
assert_equal(a, 2.5)
# Where-masked copy
mask = np.array([[0, 1, 0], [0, 0, 1]], dtype='?')
np.copyto(a, 3.5, where=mask)
assert_equal(a, [[2.5, 3.5, 2.5], [2.5, 2.5, 3.5]])
mask = np.array([[0, 1], [1, 1], [1, 0]], dtype='?')
np.copyto(a.T, 4.5, where=mask)
assert_equal(a, [[2.5, 4.5, 4.5], [4.5, 4.5, 3.5]])
def test_copyto():
a = np.arange(6, dtype='i4').reshape(2, 3)
# Simple copy
np.copyto(a, [[3, 1, 5], [6, 2, 1]])
assert_equal(a, [[3, 1, 5], [6, 2, 1]])
# Overlapping copy should work
np.copyto(a[:, :2], a[::-1, 1::-1])
assert_equal(a, [[2, 6, 5], [1, 3, 1]])
# Defaults to 'same_kind' casting
assert_raises(TypeError, np.copyto, a, 1.5)
# Force a copy with 'unsafe' casting, truncating 1.5 to 1
np.copyto(a, 1.5, casting='unsafe')
assert_equal(a, 1)
# Copying with a mask
np.copyto(a, 3, where=[True, False, True])
assert_equal(a, [[3, 1, 3], [3, 1, 3]])
# Casting rule still applies with a mask
assert_raises(TypeError, np.copyto, a, 3.5, where=[True, False, True])
# Lists of integer 0's and 1's is ok too
np.copyto(a, 4.0, casting='unsafe', where=[[0, 1, 1], [1, 0, 0]])
assert_equal(a, [[3, 4, 4], [4, 1, 3]])
# Overlapping copy with mask should work
np.copyto(a[:, :2], a[::-1, 1::-1], where=[[0, 1], [1, 1]])
assert_equal(a, [[3, 4, 4], [4, 3, 3]])
# 'dst' must be an array
assert_raises(TypeError, np.copyto, [1, 2, 3], [2, 3, 4])
def test_copyto_permut():
# test explicit overflow case
pad = 500
l = [True] * pad + [True, True, True, True]
r = np.zeros(len(l)-pad)
d = np.ones(len(l)-pad)
mask = np.array(l)[pad:]
np.copyto(r, d, where=mask[::-1])
# test all permutation of possible masks, 9 should be sufficient for
# current 4 byte unrolled code
power = 9
d = np.ones(power)
for i in range(2**power):
r = np.zeros(power)
l = [(i & x) != 0 for x in range(power)]
mask = np.array(l)
np.copyto(r, d, where=mask)
assert_array_equal(r == 1, l)
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r, d, where=mask[::-1])
assert_array_equal(r == 1, l[::-1])
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r[::2], d[::2], where=mask[::2])
assert_array_equal(r[::2] == 1, l[::2])
assert_equal(r[::2].sum(), sum(l[::2]))
r = np.zeros(power)
np.copyto(r[::2], d[::2], where=mask[::-2])
assert_array_equal(r[::2] == 1, l[::-2])
assert_equal(r[::2].sum(), sum(l[::-2]))
for c in [0xFF, 0x7F, 0x02, 0x10]:
r = np.zeros(power)
mask = np.array(l)
imask = np.array(l).view(np.uint8)
imask[mask != 0] = c
np.copyto(r, d, where=mask)
assert_array_equal(r == 1, l)
assert_equal(r.sum(), sum(l))
r = np.zeros(power)
np.copyto(r, d, where=True)
assert_equal(r.sum(), r.size)
r = np.ones(power)
d = np.zeros(power)
np.copyto(r, d, where=False)
assert_equal(r.sum(), r.size)
def test_copy_order():
a = np.arange(24).reshape(2, 1, 3, 4)
b = a.copy(order='F')
c = np.arange(24).reshape(2, 1, 4, 3).swapaxes(2, 3)
def check_copy_result(x, y, ccontig, fcontig, strides=False):
assert_(not (x is y))
assert_equal(x, y)
assert_equal(res.flags.c_contiguous, ccontig)
assert_equal(res.flags.f_contiguous, fcontig)
# This check is impossible only because
# NPY_RELAXED_STRIDES_CHECKING changes the strides actively
if not NPY_RELAXED_STRIDES_CHECKING:
if strides:
assert_equal(x.strides, y.strides)
else:
assert_(x.strides != y.strides)
# Validate the initial state of a, b, and c
assert_(a.flags.c_contiguous)
assert_(not a.flags.f_contiguous)
assert_(not b.flags.c_contiguous)
assert_(b.flags.f_contiguous)
assert_(not c.flags.c_contiguous)
assert_(not c.flags.f_contiguous)
# Copy with order='C'
res = a.copy(order='C')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = b.copy(order='C')
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
res = c.copy(order='C')
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
res = np.copy(a, order='C')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = np.copy(b, order='C')
check_copy_result(res, b, ccontig=True, fcontig=False, strides=False)
res = np.copy(c, order='C')
check_copy_result(res, c, ccontig=True, fcontig=False, strides=False)
# Copy with order='F'
res = a.copy(order='F')
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
res = b.copy(order='F')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = c.copy(order='F')
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
res = np.copy(a, order='F')
check_copy_result(res, a, ccontig=False, fcontig=True, strides=False)
res = np.copy(b, order='F')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = np.copy(c, order='F')
check_copy_result(res, c, ccontig=False, fcontig=True, strides=False)
# Copy with order='K'
res = a.copy(order='K')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = b.copy(order='K')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = c.copy(order='K')
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
res = np.copy(a, order='K')
check_copy_result(res, a, ccontig=True, fcontig=False, strides=True)
res = np.copy(b, order='K')
check_copy_result(res, b, ccontig=False, fcontig=True, strides=True)
res = np.copy(c, order='K')
check_copy_result(res, c, ccontig=False, fcontig=False, strides=True)
def test_contiguous_flags():
a = np.ones((4, 4, 1))[::2,:,:]
if NPY_RELAXED_STRIDES_CHECKING:
a.strides = a.strides[:2] + (-123,)
b = np.ones((2, 2, 1, 2, 2)).swapaxes(3, 4)
def check_contig(a, ccontig, fcontig):
assert_(a.flags.c_contiguous == ccontig)
assert_(a.flags.f_contiguous == fcontig)
# Check if new arrays are correct:
check_contig(a, False, False)
check_contig(b, False, False)
if NPY_RELAXED_STRIDES_CHECKING:
check_contig(np.empty((2, 2, 0, 2, 2)), True, True)
check_contig(np.array([[[1], [2]]], order='F'), True, True)
else:
check_contig(np.empty((2, 2, 0, 2, 2)), True, False)
check_contig(np.array([[[1], [2]]], order='F'), False, True)
check_contig(np.empty((2, 2)), True, False)
check_contig(np.empty((2, 2), order='F'), False, True)
# Check that np.array creates correct contiguous flags:
check_contig(np.array(a, copy=False), False, False)
check_contig(np.array(a, copy=False, order='C'), True, False)
check_contig(np.array(a, ndmin=4, copy=False, order='F'), False, True)
if NPY_RELAXED_STRIDES_CHECKING:
# Check slicing update of flags and :
check_contig(a[0], True, True)
check_contig(a[None, ::4, ..., None], True, True)
check_contig(b[0, 0, ...], False, True)
check_contig(b[:,:, 0:0,:,:], True, True)
else:
# Check slicing update of flags:
check_contig(a[0], True, False)
# Would be nice if this was C-Contiguous:
check_contig(a[None, 0, ..., None], False, False)
check_contig(b[0, 0, 0, ...], False, True)
# Test ravel and squeeze.
check_contig(a.ravel(), True, True)
check_contig(np.ones((1, 3, 1)).squeeze(), True, True)
def test_broadcast_arrays():
# Test user defined dtypes
a = np.array([(1, 2, 3)], dtype='u4,u4,u4')
b = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4')
result = np.broadcast_arrays(a, b)
assert_equal(result[0], np.array([(1, 2, 3), (1, 2, 3), (1, 2, 3)], dtype='u4,u4,u4'))
assert_equal(result[1], np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype='u4,u4,u4'))
@pytest.mark.parametrize(["shape", "fill_value", "expected_output"],
[((2, 2), [5.0, 6.0], np.array([[5.0, 6.0], [5.0, 6.0]])),
((3, 2), [1.0, 2.0], np.array([[1.0, 2.0], [1.0, 2.0], [1.0, 2.0]]))])
def test_full_from_list(shape, fill_value, expected_output):
output = np.full(shape, fill_value)
assert_equal(output, expected_output)
| bsd-3-clause |
pyfa-org/Pyfa | graphs/data/base/cache.py | 2 | 1061 | # =============================================================================
# Copyright (C) 2010 Diego Duclos
#
# This file is part of pyfa.
#
# pyfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyfa. If not, see <http://www.gnu.org/licenses/>.
# =============================================================================
class FitDataCache:
def __init__(self):
self._data = {}
def clearForFit(self, fitID):
if fitID in self._data:
del self._data[fitID]
def clearAll(self):
self._data.clear()
| gpl-3.0 |
NixaSoftware/CVis | venv/lib/python2.7/site-packages/pandas/tests/indexing/test_iloc.py | 2 | 23461 | """ test positional based indexing with iloc """
import pytest
from warnings import catch_warnings
import numpy as np
import pandas as pd
from pandas.compat import lrange, lmap
from pandas import Series, DataFrame, date_range, concat, isna
from pandas.util import testing as tm
from pandas.tests.indexing.common import Base
class TestiLoc(Base):
def test_iloc_exceeds_bounds(self):
# GH6296
# iloc should allow indexers that exceed the bounds
df = DataFrame(np.random.random_sample((20, 5)), columns=list('ABCDE'))
expected = df
# lists of positions should raise IndexErrror!
with tm.assert_raises_regex(IndexError,
'positional indexers '
'are out-of-bounds'):
df.iloc[:, [0, 1, 2, 3, 4, 5]]
pytest.raises(IndexError, lambda: df.iloc[[1, 30]])
pytest.raises(IndexError, lambda: df.iloc[[1, -30]])
pytest.raises(IndexError, lambda: df.iloc[[100]])
s = df['A']
pytest.raises(IndexError, lambda: s.iloc[[100]])
pytest.raises(IndexError, lambda: s.iloc[[-100]])
# still raise on a single indexer
msg = 'single positional indexer is out-of-bounds'
with tm.assert_raises_regex(IndexError, msg):
df.iloc[30]
pytest.raises(IndexError, lambda: df.iloc[-30])
# GH10779
# single positive/negative indexer exceeding Series bounds should raise
# an IndexError
with tm.assert_raises_regex(IndexError, msg):
s.iloc[30]
pytest.raises(IndexError, lambda: s.iloc[-30])
# slices are ok
result = df.iloc[:, 4:10] # 0 < start < len < stop
expected = df.iloc[:, 4:]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -4:-10] # stop < 0 < start < len
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4:-1] # 0 < stop < len < start (down)
expected = df.iloc[:, :4:-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 4:-10:-1] # stop < 0 < start < len (down)
expected = df.iloc[:, 4::-1]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:4] # start < 0 < stop < len
expected = df.iloc[:, :4]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:4] # 0 < stop < len < start
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, -10:-11:-1] # stop < start < 0 < len (down)
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 10:11] # 0 < len < start < stop
expected = df.iloc[:, :0]
tm.assert_frame_equal(result, expected)
# slice bounds exceeding is ok
result = s.iloc[18:30]
expected = s.iloc[18:]
tm.assert_series_equal(result, expected)
result = s.iloc[30:]
expected = s.iloc[:0]
tm.assert_series_equal(result, expected)
result = s.iloc[30::-1]
expected = s.iloc[::-1]
tm.assert_series_equal(result, expected)
# doc example
def check(result, expected):
str(result)
result.dtypes
tm.assert_frame_equal(result, expected)
dfl = DataFrame(np.random.randn(5, 2), columns=list('AB'))
check(dfl.iloc[:, 2:3], DataFrame(index=dfl.index))
check(dfl.iloc[:, 1:3], dfl.iloc[:, [1]])
check(dfl.iloc[4:6], dfl.iloc[[4]])
pytest.raises(IndexError, lambda: dfl.iloc[[4, 5, 6]])
pytest.raises(IndexError, lambda: dfl.iloc[:, 4])
def test_iloc_getitem_int(self):
# integer
self.check_result('integer', 'iloc', 2, 'ix',
{0: 4, 1: 6, 2: 8}, typs=['ints', 'uints'])
self.check_result('integer', 'iloc', 2, 'indexer', 2,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int(self):
# neg integer
self.check_result('neg int', 'iloc', -1, 'ix',
{0: 6, 1: 9, 2: 12}, typs=['ints', 'uints'])
self.check_result('neg int', 'iloc', -1, 'indexer', -1,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_list_int(self):
# list of ints
self.check_result('list int', 'iloc', [0, 1, 2], 'ix',
{0: [0, 2, 4], 1: [0, 3, 6], 2: [0, 4, 8]},
typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [2], 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('list int', 'iloc', [0, 1, 2], 'indexer', [0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
# array of ints (GH5006), make sure that a single indexer is returning
# the correct type
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'ix',
{0: [0, 2, 4],
1: [0, 3, 6],
2: [0, 4, 8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([2]), 'ix',
{0: [4], 1: [6], 2: [8]}, typs=['ints', 'uints'])
self.check_result('array int', 'iloc', np.array([0, 1, 2]), 'indexer',
[0, 1, 2],
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_neg_int_can_reach_first_index(self):
# GH10547 and GH10779
# negative integers should be able to reach index 0
df = DataFrame({'A': [2, 3, 5], 'B': [7, 11, 13]})
s = df['A']
expected = df.iloc[0]
result = df.iloc[-3]
tm.assert_series_equal(result, expected)
expected = df.iloc[[0]]
result = df.iloc[[-3]]
tm.assert_frame_equal(result, expected)
expected = s.iloc[0]
result = s.iloc[-3]
assert result == expected
expected = s.iloc[[0]]
result = s.iloc[[-3]]
tm.assert_series_equal(result, expected)
# check the length 1 Series case highlighted in GH10547
expected = pd.Series(['a'], index=['A'])
result = expected.iloc[[-1]]
tm.assert_series_equal(result, expected)
def test_iloc_getitem_dups(self):
# no dups in panel (bug?)
self.check_result('list int (dups)', 'iloc', [0, 1, 1, 3], 'ix',
{0: [0, 2, 2, 6], 1: [0, 3, 3, 9]},
objs=['series', 'frame'], typs=['ints', 'uints'])
# GH 6766
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
# cross-sectional indexing
result = df.iloc[0, 0]
assert isna(result)
result = df.iloc[0, :]
expected = Series([np.nan, 1, 3, 3], index=['A', 'B', 'A', 'B'],
name=0)
tm.assert_series_equal(result, expected)
def test_iloc_getitem_array(self):
# array like
s = Series(index=lrange(1, 4))
self.check_result('array like', 'iloc', s.index, 'ix',
{0: [2, 4, 6], 1: [3, 6, 9], 2: [4, 8, 12]},
typs=['ints', 'uints'])
def test_iloc_getitem_bool(self):
# boolean indexers
b = [True, False, True, False, ]
self.check_result('bool', 'iloc', b, 'ix', b, typs=['ints', 'uints'])
self.check_result('bool', 'iloc', b, 'ix', b,
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice(self):
# slices
self.check_result('slice', 'iloc', slice(1, 3), 'ix',
{0: [2, 4], 1: [3, 6], 2: [4, 8]},
typs=['ints', 'uints'])
self.check_result('slice', 'iloc', slice(1, 3), 'indexer',
slice(1, 3),
typs=['labels', 'mixed', 'ts', 'floats', 'empty'],
fails=IndexError)
def test_iloc_getitem_slice_dups(self):
df1 = DataFrame(np.random.randn(10, 4), columns=['A', 'A', 'B', 'B'])
df2 = DataFrame(np.random.randint(0, 10, size=20).reshape(10, 2),
columns=['A', 'C'])
# axis=1
df = concat([df1, df2], axis=1)
tm.assert_frame_equal(df.iloc[:, :4], df1)
tm.assert_frame_equal(df.iloc[:, 4:], df2)
df = concat([df2, df1], axis=1)
tm.assert_frame_equal(df.iloc[:, :2], df2)
tm.assert_frame_equal(df.iloc[:, 2:], df1)
exp = concat([df2, df1.iloc[:, [0]]], axis=1)
tm.assert_frame_equal(df.iloc[:, 0:3], exp)
# axis=0
df = concat([df, df], axis=0)
tm.assert_frame_equal(df.iloc[0:10, :2], df2)
tm.assert_frame_equal(df.iloc[0:10, 2:], df1)
tm.assert_frame_equal(df.iloc[10:, :2], df2)
tm.assert_frame_equal(df.iloc[10:, 2:], df1)
def test_iloc_setitem(self):
df = self.frame_ints
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
# GH5771
s = Series(0, index=[4, 5, 6])
s.iloc[1:2] += 1
expected = Series([0, 1, 0], index=[4, 5, 6])
tm.assert_series_equal(s, expected)
@pytest.mark.parametrize(
'data, indexes, values, expected_k', [
# test without indexer value in first level of MultiIndex
([[2, 22, 5], [2, 33, 6]], [0, -1, 1], [2, 3, 1], [7, 10]),
# test like code sample 1 in the issue
([[1, 22, 555], [1, 33, 666]], [0, -1, 1], [200, 300, 100],
[755, 1066]),
# test like code sample 2 in the issue
([[1, 3, 7], [2, 4, 8]], [0, -1, 1], [10, 10, 1000], [17, 1018]),
# test like code sample 3 in the issue
([[1, 11, 4], [2, 22, 5], [3, 33, 6]], [0, -1, 1], [4, 7, 10],
[8, 15, 13])
])
def test_iloc_setitem_int_multiindex_series(
self, data, indexes, values, expected_k):
# GH17148
df = pd.DataFrame(
data=data,
columns=['i', 'j', 'k'])
df = df.set_index(['i', 'j'])
series = df.k.copy()
for i, v in zip(indexes, values):
series.iloc[i] += v
df['k'] = expected_k
expected = df.k
tm.assert_series_equal(series, expected)
def test_iloc_setitem_list(self):
# setitem with an iloc list
df = DataFrame(np.arange(9).reshape((3, 3)), index=["A", "B", "C"],
columns=["A", "B", "C"])
df.iloc[[0, 1], [1, 2]]
df.iloc[[0, 1], [1, 2]] += 100
expected = DataFrame(
np.array([0, 101, 102, 3, 104, 105, 6, 7, 8]).reshape((3, 3)),
index=["A", "B", "C"], columns=["A", "B", "C"])
tm.assert_frame_equal(df, expected)
def test_iloc_setitem_pandas_object(self):
# GH 17193, affecting old numpy (1.7 and 1.8)
s_orig = Series([0, 1, 2, 3])
expected = Series([0, -1, -2, 3])
s = s_orig.copy()
s.iloc[Series([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
s = s_orig.copy()
s.iloc[pd.Index([1, 2])] = [-1, -2]
tm.assert_series_equal(s, expected)
def test_iloc_setitem_dups(self):
# GH 6766
# iloc with a mask aligning from another iloc
df1 = DataFrame([{'A': None, 'B': 1}, {'A': 2, 'B': 2}])
df2 = DataFrame([{'A': 3, 'B': 3}, {'A': 4, 'B': 4}])
df = concat([df1, df2], axis=1)
expected = df.fillna(3)
expected['A'] = expected['A'].astype('float64')
inds = np.isnan(df.iloc[:, 0])
mask = inds[inds].index
df.iloc[mask, 0] = df.iloc[mask, 2]
tm.assert_frame_equal(df, expected)
# del a dup column across blocks
expected = DataFrame({0: [1, 2], 1: [3, 4]})
expected.columns = ['B', 'B']
del df['A']
tm.assert_frame_equal(df, expected)
# assign back to self
df.iloc[[0, 1], [0, 1]] = df.iloc[[0, 1], [0, 1]]
tm.assert_frame_equal(df, expected)
# reversed x 2
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
df.iloc[[1, 0], [0, 1]] = df.iloc[[1, 0], [0, 1]].reset_index(
drop=True)
tm.assert_frame_equal(df, expected)
def test_iloc_getitem_frame(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2),
columns=lrange(0, 8, 2))
result = df.iloc[2]
with catch_warnings(record=True):
exp = df.ix[4]
tm.assert_series_equal(result, exp)
result = df.iloc[2, 2]
with catch_warnings(record=True):
exp = df.ix[4, 4]
assert result == exp
# slice
result = df.iloc[4:8]
with catch_warnings(record=True):
expected = df.ix[8:14]
tm.assert_frame_equal(result, expected)
result = df.iloc[:, 2:3]
with catch_warnings(record=True):
expected = df.ix[:, 4:5]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[0, 1, 3]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6]]
tm.assert_frame_equal(result, expected)
result = df.iloc[[0, 1, 3], [0, 1]]
with catch_warnings(record=True):
expected = df.ix[[0, 2, 6], [0, 2]]
tm.assert_frame_equal(result, expected)
# neg indicies
result = df.iloc[[-1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# dups indicies
result = df.iloc[[-1, -1, 1, 3], [-1, 1]]
with catch_warnings(record=True):
expected = df.ix[[18, 18, 2, 6], [6, 2]]
tm.assert_frame_equal(result, expected)
# with index-like
s = Series(index=lrange(1, 5))
result = df.iloc[s.index]
with catch_warnings(record=True):
expected = df.ix[[2, 4, 6, 8]]
tm.assert_frame_equal(result, expected)
def test_iloc_getitem_labelled_frame(self):
# try with labelled frame
df = DataFrame(np.random.randn(10, 4),
index=list('abcdefghij'), columns=list('ABCD'))
result = df.iloc[1, 1]
exp = df.loc['b', 'B']
assert result == exp
result = df.iloc[:, 2:3]
expected = df.loc[:, ['C']]
tm.assert_frame_equal(result, expected)
# negative indexing
result = df.iloc[-1, -1]
exp = df.loc['j', 'D']
assert result == exp
# out-of-bounds exception
pytest.raises(IndexError, df.iloc.__getitem__, tuple([10, 5]))
# trying to use a label
pytest.raises(ValueError, df.iloc.__getitem__, tuple(['j', 'D']))
def test_iloc_getitem_doc_issue(self):
# multi axis slicing issue with single block
# surfaced in GH 6059
arr = np.random.randn(6, 4)
index = date_range('20130101', periods=6)
columns = list('ABCD')
df = DataFrame(arr, index=index, columns=columns)
# defines ref_locs
df.describe()
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=columns[0:2])
tm.assert_frame_equal(result, expected)
# for dups
df.columns = list('aaaa')
result = df.iloc[3:5, 0:2]
str(result)
result.dtypes
expected = DataFrame(arr[3:5, 0:2], index=index[3:5],
columns=list('aa'))
tm.assert_frame_equal(result, expected)
# related
arr = np.random.randn(6, 4)
index = list(range(0, 12, 2))
columns = list(range(0, 8, 2))
df = DataFrame(arr, index=index, columns=columns)
df._data.blocks[0].mgr_locs
result = df.iloc[1:5, 2:4]
str(result)
result.dtypes
expected = DataFrame(arr[1:5, 2:4], index=index[1:5],
columns=columns[2:4])
tm.assert_frame_equal(result, expected)
def test_iloc_setitem_series(self):
df = DataFrame(np.random.randn(10, 4), index=list('abcdefghij'),
columns=list('ABCD'))
df.iloc[1, 1] = 1
result = df.iloc[1, 1]
assert result == 1
df.iloc[:, 2:3] = 0
expected = df.iloc[:, 2:3]
result = df.iloc[:, 2:3]
tm.assert_frame_equal(result, expected)
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
s.iloc[1] = 1
result = s.iloc[1]
assert result == 1
s.iloc[:4] = 0
expected = s.iloc[:4]
result = s.iloc[:4]
tm.assert_series_equal(result, expected)
s = Series([-1] * 6)
s.iloc[0::2] = [0, 2, 4]
s.iloc[1::2] = [1, 3, 5]
result = s
expected = Series([0, 1, 2, 3, 4, 5])
tm.assert_series_equal(result, expected)
def test_iloc_setitem_list_of_lists(self):
# GH 7551
# list-of-list is set incorrectly in mixed vs. single dtyped frames
df = DataFrame(dict(A=np.arange(5, dtype='int64'),
B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [[10, 11], [12, 13]]
expected = DataFrame(dict(A=[0, 1, 10, 12, 4], B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
df = DataFrame(
dict(A=list('abcde'), B=np.arange(5, 10, dtype='int64')))
df.iloc[2:4] = [['x', 11], ['y', 13]]
expected = DataFrame(dict(A=['a', 'b', 'x', 'y', 'e'],
B=[5, 6, 11, 13, 9]))
tm.assert_frame_equal(df, expected)
def test_iloc_mask(self):
# GH 3631, iloc with a mask (of a series) should raise
df = DataFrame(lrange(5), list('ABCDE'), columns=['a'])
mask = (df.a % 2 == 0)
pytest.raises(ValueError, df.iloc.__getitem__, tuple([mask]))
mask.index = lrange(len(mask))
pytest.raises(NotImplementedError, df.iloc.__getitem__,
tuple([mask]))
# ndarray ok
result = df.iloc[np.array([True] * len(mask), dtype=bool)]
tm.assert_frame_equal(result, df)
# the possibilities
locs = np.arange(4)
nums = 2 ** locs
reps = lmap(bin, nums)
df = DataFrame({'locs': locs, 'nums': nums}, reps)
expected = {
(None, ''): '0b1100',
(None, '.loc'): '0b1100',
(None, '.iloc'): '0b1100',
('index', ''): '0b11',
('index', '.loc'): '0b11',
('index', '.iloc'): ('iLocation based boolean indexing '
'cannot use an indexable as a mask'),
('locs', ''): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the indexed '
'object do not match',
('locs', '.loc'): 'Unalignable boolean Series provided as indexer '
'(index of the boolean Series and of the '
'indexed object do not match',
('locs', '.iloc'): ('iLocation based boolean indexing on an '
'integer type is not available'),
}
# UserWarnings from reindex of a boolean mask
with catch_warnings(record=True):
result = dict()
for idx in [None, 'index', 'locs']:
mask = (df.nums > 2).values
if idx:
mask = Series(mask, list(reversed(getattr(df, idx))))
for method in ['', '.loc', '.iloc']:
try:
if method:
accessor = getattr(df, method[1:])
else:
accessor = df
ans = str(bin(accessor[mask]['nums'].sum()))
except Exception as e:
ans = str(e)
key = tuple([idx, method])
r = expected.get(key)
if r != ans:
raise AssertionError(
"[%s] does not match [%s], received [%s]"
% (key, ans, r))
def test_iloc_non_unique_indexing(self):
# GH 4017, non-unique indexing (on the axis)
df = DataFrame({'A': [0.1] * 3000, 'B': [1] * 3000})
idx = np.array(lrange(30)) * 99
expected = df.iloc[idx]
df3 = pd.concat([df, 2 * df, 3 * df])
result = df3.iloc[idx]
tm.assert_frame_equal(result, expected)
df2 = DataFrame({'A': [0.1] * 1000, 'B': [1] * 1000})
df2 = pd.concat([df2, 2 * df2, 3 * df2])
sidx = df2.index.to_series()
expected = df2.iloc[idx[idx <= sidx.max()]]
new_list = []
for r, s in expected.iterrows():
new_list.append(s)
new_list.append(s * 2)
new_list.append(s * 3)
expected = DataFrame(new_list)
expected = pd.concat([expected, DataFrame(index=idx[idx > sidx.max()])
])
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = df2.loc[idx]
tm.assert_frame_equal(result, expected, check_index_type=False)
def test_iloc_empty_list_indexer_is_ok(self):
from pandas.util.testing import makeCustomDataframe as mkdf
df = mkdf(5, 2)
# vertical empty
tm.assert_frame_equal(df.iloc[:, []], df.iloc[:, :0],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[], :], df.iloc[:0, :],
check_index_type=True, check_column_type=True)
# horizontal empty
tm.assert_frame_equal(df.iloc[[]], df.iloc[:0, :],
check_index_type=True,
check_column_type=True)
def test_identity_slice_returns_new_object(self):
# GH13873
original_df = DataFrame({'a': [1, 2, 3]})
sliced_df = original_df.iloc[:]
assert sliced_df is not original_df
# should be a shallow copy
original_df['a'] = [4, 4, 4]
assert (sliced_df['a'] == 4).all()
original_series = Series([1, 2, 3, 4, 5, 6])
sliced_series = original_series.iloc[:]
assert sliced_series is not original_series
# should also be a shallow copy
original_series[:3] = [7, 8, 9]
assert all(sliced_series[:3] == [7, 8, 9])
| apache-2.0 |
natefoo/pip | tests/unit/test_options.py | 22 | 9922 | import os
import pytest
import pip.baseparser
from pip import main
from pip import cmdoptions
from pip.basecommand import Command
from pip.commands import commands_dict as commands
class FakeCommand(Command):
name = 'fake'
summary = name
def main(self, args):
index_opts = cmdoptions.make_option_group(
cmdoptions.index_group,
self.parser,
)
self.parser.add_option_group(index_opts)
return self.parse_args(args)
class TestOptionPrecedence(object):
"""
Tests for confirming our option precedence:
cli -> environment -> subcommand config -> global config -> option
defaults
"""
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def get_config_section(self, section):
config = {
'global': [('timeout', '-3')],
'fake': [('timeout', '-2')],
}
return config[section]
def get_config_section_global(self, section):
config = {
'global': [('timeout', '-3')],
'fake': [],
}
return config[section]
def test_env_override_default_int(self):
"""
Test that environment variable overrides an int option default.
"""
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake'])
assert options.timeout == -1
def test_env_override_default_append(self):
"""
Test that environment variable overrides an append option default.
"""
os.environ['PIP_FIND_LINKS'] = 'F1'
options, args = main(['fake'])
assert options.find_links == ['F1']
os.environ['PIP_FIND_LINKS'] = 'F1 F2'
options, args = main(['fake'])
assert options.find_links == ['F1', 'F2']
def test_env_override_default_choice(self):
"""
Test that environment variable overrides a choice option default.
"""
os.environ['PIP_EXISTS_ACTION'] = 'w'
options, args = main(['fake'])
assert options.exists_action == ['w']
os.environ['PIP_EXISTS_ACTION'] = 's w'
options, args = main(['fake'])
assert options.exists_action == ['s', 'w']
def test_env_alias_override_default(self):
"""
When an option has multiple long forms, test that the technique of
using the env variable, "PIP_<long form>" works for all cases.
(e.g. PIP_LOG_FILE and PIP_LOCAL_LOG should all work)
"""
os.environ['PIP_LOG_FILE'] = 'override.log'
options, args = main(['fake'])
assert options.log == 'override.log'
os.environ['PIP_LOCAL_LOG'] = 'override.log'
options, args = main(['fake'])
assert options.log == 'override.log'
def test_cli_override_environment(self):
"""
Test the cli overrides and environment variable
"""
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake', '--timeout', '-2'])
assert options.timeout == -2
def test_environment_override_config(self, monkeypatch):
"""
Test an environment variable overrides the config file
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section,
)
os.environ['PIP_TIMEOUT'] = '-1'
options, args = main(['fake'])
assert options.timeout == -1
def test_commmand_config_override_global_config(self, monkeypatch):
"""
Test that command config overrides global config
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section,
)
options, args = main(['fake'])
assert options.timeout == -2
def test_global_config_is_used(self, monkeypatch):
"""
Test that global config is used
"""
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
"get_config_section",
self.get_config_section_global,
)
options, args = main(['fake'])
assert options.timeout == -3
class TestOptionsInterspersed(object):
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def test_general_option_after_subcommand(self):
options, args = main(['fake', '--timeout', '-1'])
assert options.timeout == -1
def test_option_after_subcommand_arg(self):
options, args = main(['fake', 'arg', '--timeout', '-1'])
assert options.timeout == -1
def test_additive_before_after_subcommand(self):
options, args = main(['-v', 'fake', '-v'])
assert options.verbose == 2
def test_subcommand_option_before_subcommand_fails(self):
with pytest.raises(SystemExit):
main(['--find-links', 'F1', 'fake'])
class TestGeneralOptions(object):
# the reason to specifically test general options is due to the
# extra processing they receive, and the number of bugs we've had
def setup(self):
self.environ_before = os.environ.copy()
commands[FakeCommand.name] = FakeCommand
def teardown(self):
os.environ = self.environ_before
commands.pop(FakeCommand.name)
def test_require_virtualenv(self):
options1, args1 = main(['--require-virtualenv', 'fake'])
options2, args2 = main(['fake', '--require-virtualenv'])
assert options1.require_venv
assert options2.require_venv
def test_verbose(self):
options1, args1 = main(['--verbose', 'fake'])
options2, args2 = main(['fake', '--verbose'])
assert options1.verbose == options2.verbose == 1
def test_quiet(self):
options1, args1 = main(['--quiet', 'fake'])
options2, args2 = main(['fake', '--quiet'])
assert options1.quiet == options2.quiet == 1
options3, args3 = main(['--quiet', '--quiet', 'fake'])
options4, args4 = main(['fake', '--quiet', '--quiet'])
assert options3.quiet == options4.quiet == 2
options5, args5 = main(['--quiet', '--quiet', '--quiet', 'fake'])
options6, args6 = main(['fake', '--quiet', '--quiet', '--quiet'])
assert options5.quiet == options6.quiet == 3
def test_log(self):
options1, args1 = main(['--log', 'path', 'fake'])
options2, args2 = main(['fake', '--log', 'path'])
assert options1.log == options2.log == 'path'
def test_local_log(self):
options1, args1 = main(['--local-log', 'path', 'fake'])
options2, args2 = main(['fake', '--local-log', 'path'])
assert options1.log == options2.log == 'path'
def test_no_input(self):
options1, args1 = main(['--no-input', 'fake'])
options2, args2 = main(['fake', '--no-input'])
assert options1.no_input
assert options2.no_input
def test_proxy(self):
options1, args1 = main(['--proxy', 'path', 'fake'])
options2, args2 = main(['fake', '--proxy', 'path'])
assert options1.proxy == options2.proxy == 'path'
def test_retries(self):
options1, args1 = main(['--retries', '-1', 'fake'])
options2, args2 = main(['fake', '--retries', '-1'])
assert options1.retries == options2.retries == -1
def test_timeout(self):
options1, args1 = main(['--timeout', '-1', 'fake'])
options2, args2 = main(['fake', '--timeout', '-1'])
assert options1.timeout == options2.timeout == -1
def test_default_vcs(self):
options1, args1 = main(['--default-vcs', 'path', 'fake'])
options2, args2 = main(['fake', '--default-vcs', 'path'])
assert options1.default_vcs == options2.default_vcs == 'path'
def test_skip_requirements_regex(self):
options1, args1 = main(['--skip-requirements-regex', 'path', 'fake'])
options2, args2 = main(['fake', '--skip-requirements-regex', 'path'])
assert options1.skip_requirements_regex == 'path'
assert options2.skip_requirements_regex == 'path'
def test_exists_action(self):
options1, args1 = main(['--exists-action', 'w', 'fake'])
options2, args2 = main(['fake', '--exists-action', 'w'])
assert options1.exists_action == options2.exists_action == ['w']
def test_cert(self):
options1, args1 = main(['--cert', 'path', 'fake'])
options2, args2 = main(['fake', '--cert', 'path'])
assert options1.cert == options2.cert == 'path'
def test_client_cert(self):
options1, args1 = main(['--client-cert', 'path', 'fake'])
options2, args2 = main(['fake', '--client-cert', 'path'])
assert options1.client_cert == options2.client_cert == 'path'
class TestOptionsConfigFiles(object):
def test_venv_config_file_found(self, monkeypatch):
# We only want a dummy object to call the get_config_files method
monkeypatch.setattr(
pip.baseparser.ConfigOptionParser,
'__init__',
lambda self: None,
)
# strict limit on the site_config_files list
monkeypatch.setattr(pip.baseparser, 'site_config_files', ['/a/place'])
# If we are running in a virtualenv and all files appear to exist,
# we should see two config files.
monkeypatch.setattr(
pip.baseparser,
'running_under_virtualenv',
lambda: True,
)
monkeypatch.setattr(os.path, 'exists', lambda filename: True)
cp = pip.baseparser.ConfigOptionParser()
assert len(cp.get_config_files()) == 4
| mit |
axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/google/cloud/bigquery/dataset.py | 7 | 21299 | # Copyright 2015 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define API Datasets."""
import six
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud.exceptions import NotFound
from google.cloud.bigquery.table import Table
from google.cloud.iterator import HTTPIterator
class AccessGrant(object):
"""Represent grant of an access role to an entity.
Every entry in the access list will have exactly one of
``userByEmail``, ``groupByEmail``, ``domain``, ``specialGroup`` or
``view`` set. And if anything but ``view`` is set, it'll also have a
``role`` specified. ``role`` is omitted for a ``view``, since
``view`` s are always read-only.
See https://cloud.google.com/bigquery/docs/reference/v2/datasets.
:type role: str
:param role: Role granted to the entity. One of
* ``'OWNER'``
* ``'WRITER'``
* ``'READER'``
May also be ``None`` if the ``entity_type`` is ``view``.
:type entity_type: str
:param entity_type: Type of entity being granted the role. One of
:attr:`ENTITY_TYPES`.
:type entity_id: str
:param entity_id: ID of entity being granted the role.
:raises: :class:`ValueError` if the ``entity_type`` is not among
:attr:`ENTITY_TYPES`, or if a ``view`` has ``role`` set or
a non ``view`` **does not** have a ``role`` set.
"""
ENTITY_TYPES = frozenset(['userByEmail', 'groupByEmail', 'domain',
'specialGroup', 'view'])
"""Allowed entity types."""
def __init__(self, role, entity_type, entity_id):
if entity_type not in self.ENTITY_TYPES:
message = 'Entity type %r not among: %s' % (
entity_type, ', '.join(self.ENTITY_TYPES))
raise ValueError(message)
if entity_type == 'view':
if role is not None:
raise ValueError('Role must be None for a view. Received '
'role: %r' % (role,))
else:
if role is None:
raise ValueError('Role must be set for entity '
'type %r' % (entity_type,))
self.role = role
self.entity_type = entity_type
self.entity_id = entity_id
def __eq__(self, other):
return (
self.role == other.role and
self.entity_type == other.entity_type and
self.entity_id == other.entity_id)
def __repr__(self):
return '<AccessGrant: role=%s, %s=%s>' % (
self.role, self.entity_type, self.entity_id)
class Dataset(object):
"""Datasets are containers for tables.
See:
https://cloud.google.com/bigquery/docs/reference/v2/datasets
:type name: str
:param name: the name of the dataset
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: A client which holds credentials and project configuration
for the dataset (which requires a project).
:type access_grants: list of :class:`AccessGrant`
:param access_grants: roles granted to entities for this dataset
:type project: str
:param project: (Optional) project ID for the dataset (defaults to
the project of the client).
"""
_access_grants = None
def __init__(self, name, client, access_grants=(), project=None):
self.name = name
self._client = client
self._properties = {}
# Let the @property do validation.
self.access_grants = access_grants
self._project = project or client.project
@property
def project(self):
"""Project bound to the dataset.
:rtype: str
:returns: the project (derived from the client).
"""
return self._project
@property
def path(self):
"""URL path for the dataset's APIs.
:rtype: str
:returns: the path based on project and dataste name.
"""
return '/projects/%s/datasets/%s' % (self.project, self.name)
@property
def access_grants(self):
"""Dataset's access grants.
:rtype: list of :class:`AccessGrant`
:returns: roles granted to entities for this dataset
"""
return list(self._access_grants)
@access_grants.setter
def access_grants(self, value):
"""Update dataset's access grants
:type value: list of :class:`AccessGrant`
:param value: roles granted to entities for this dataset
:raises: TypeError if 'value' is not a sequence, or ValueError if
any item in the sequence is not an AccessGrant
"""
if not all(isinstance(field, AccessGrant) for field in value):
raise ValueError('Values must be AccessGrant instances')
self._access_grants = tuple(value)
@property
def created(self):
"""Datetime at which the dataset was created.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the creation time (None until set from the server).
"""
creation_time = self._properties.get('creationTime')
if creation_time is not None:
# creation_time will be in milliseconds.
return _datetime_from_microseconds(1000.0 * creation_time)
@property
def dataset_id(self):
"""ID for the dataset resource.
:rtype: str, or ``NoneType``
:returns: the ID (None until set from the server).
"""
return self._properties.get('id')
@property
def etag(self):
"""ETag for the dataset resource.
:rtype: str, or ``NoneType``
:returns: the ETag (None until set from the server).
"""
return self._properties.get('etag')
@property
def modified(self):
"""Datetime at which the dataset was last modified.
:rtype: ``datetime.datetime``, or ``NoneType``
:returns: the modification time (None until set from the server).
"""
modified_time = self._properties.get('lastModifiedTime')
if modified_time is not None:
# modified_time will be in milliseconds.
return _datetime_from_microseconds(1000.0 * modified_time)
@property
def self_link(self):
"""URL for the dataset resource.
:rtype: str, or ``NoneType``
:returns: the URL (None until set from the server).
"""
return self._properties.get('selfLink')
@property
def default_table_expiration_ms(self):
"""Default expiration time for tables in the dataset.
:rtype: int, or ``NoneType``
:returns: The time in milliseconds, or None (the default).
"""
return self._properties.get('defaultTableExpirationMs')
@default_table_expiration_ms.setter
def default_table_expiration_ms(self, value):
"""Update default expiration time for tables in the dataset.
:type value: int
:param value: (Optional) new default time, in milliseconds
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.integer_types) and value is not None:
raise ValueError("Pass an integer, or None")
self._properties['defaultTableExpirationMs'] = value
@property
def description(self):
"""Description of the dataset.
:rtype: str, or ``NoneType``
:returns: The description as set by the user, or None (the default).
"""
return self._properties.get('description')
@description.setter
def description(self, value):
"""Update description of the dataset.
:type value: str
:param value: (Optional) new description
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties['description'] = value
@property
def friendly_name(self):
"""Title of the dataset.
:rtype: str, or ``NoneType``
:returns: The name as set by the user, or None (the default).
"""
return self._properties.get('friendlyName')
@friendly_name.setter
def friendly_name(self, value):
"""Update title of the dataset.
:type value: str
:param value: (Optional) new title
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties['friendlyName'] = value
@property
def location(self):
"""Location in which the dataset is hosted.
:rtype: str, or ``NoneType``
:returns: The location as set by the user, or None (the default).
"""
return self._properties.get('location')
@location.setter
def location(self, value):
"""Update location in which the dataset is hosted.
:type value: str
:param value: (Optional) new location
:raises: ValueError for invalid value types.
"""
if not isinstance(value, six.string_types) and value is not None:
raise ValueError("Pass a string, or None")
self._properties['location'] = value
@classmethod
def from_api_repr(cls, resource, client):
"""Factory: construct a dataset given its API representation
:type resource: dict
:param resource: dataset resource representation returned from the API
:type client: :class:`google.cloud.bigquery.client.Client`
:param client: Client which holds credentials and project
configuration for the dataset.
:rtype: :class:`google.cloud.bigquery.dataset.Dataset`
:returns: Dataset parsed from ``resource``.
"""
if ('datasetReference' not in resource or
'datasetId' not in resource['datasetReference']):
raise KeyError('Resource lacks required identity information:'
'["datasetReference"]["datasetId"]')
name = resource['datasetReference']['datasetId']
dataset = cls(name, client=client)
dataset._set_properties(resource)
return dataset
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: :class:`google.cloud.bigquery.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
@staticmethod
def _parse_access_grants(access):
"""Parse a resource fragment into a set of access grants.
``role`` augments the entity type and present **unless** the entity
type is ``view``.
:type access: list of mappings
:param access: each mapping represents a single access grant.
:rtype: list of :class:`AccessGrant`
:returns: a list of parsed grants.
:raises: :class:`ValueError` if a grant in ``access`` has more keys
than ``role`` and one additional key.
"""
result = []
for grant in access:
grant = grant.copy()
role = grant.pop('role', None)
entity_type, entity_id = grant.popitem()
if len(grant) != 0:
raise ValueError('Grant has unexpected keys remaining.', grant)
result.append(
AccessGrant(role, entity_type, entity_id))
return result
def _set_properties(self, api_response):
"""Update properties from resource in body of ``api_response``
:type api_response: httplib2.Response
:param api_response: response returned from an API call.
"""
self._properties.clear()
cleaned = api_response.copy()
access = cleaned.pop('access', ())
self.access_grants = self._parse_access_grants(access)
if 'creationTime' in cleaned:
cleaned['creationTime'] = float(cleaned['creationTime'])
if 'lastModifiedTime' in cleaned:
cleaned['lastModifiedTime'] = float(cleaned['lastModifiedTime'])
if 'defaultTableExpirationMs' in cleaned:
cleaned['defaultTableExpirationMs'] = int(
cleaned['defaultTableExpirationMs'])
self._properties.update(cleaned)
def _build_access_resource(self):
"""Generate a resource fragment for dataset's access grants."""
result = []
for grant in self.access_grants:
info = {grant.entity_type: grant.entity_id}
if grant.role is not None:
info['role'] = grant.role
result.append(info)
return result
def _build_resource(self):
"""Generate a resource for ``create`` or ``update``."""
resource = {
'datasetReference': {
'projectId': self.project, 'datasetId': self.name},
}
if self.default_table_expiration_ms is not None:
value = self.default_table_expiration_ms
resource['defaultTableExpirationMs'] = value
if self.description is not None:
resource['description'] = self.description
if self.friendly_name is not None:
resource['friendlyName'] = self.friendly_name
if self.location is not None:
resource['location'] = self.location
if len(self.access_grants) > 0:
resource['access'] = self._build_access_resource()
return resource
def create(self, client=None):
"""API call: create the dataset via a PUT request.
See:
https://cloud.google.com/bigquery/docs/reference/v2/tables/insert
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
path = '/projects/%s/datasets' % (self.project,)
api_response = client._connection.api_request(
method='POST', path=path, data=self._build_resource())
self._set_properties(api_response)
def exists(self, client=None):
"""API call: test for the existence of the dataset via a GET request
See
https://cloud.google.com/bigquery/docs/reference/v2/datasets/get
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: bool
:returns: Boolean indicating existence of the dataset.
"""
client = self._require_client(client)
try:
client._connection.api_request(method='GET', path=self.path,
query_params={'fields': 'id'})
except NotFound:
return False
else:
return True
def reload(self, client=None):
"""API call: refresh dataset properties via a GET request.
See
https://cloud.google.com/bigquery/docs/reference/v2/datasets/get
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
api_response = client._connection.api_request(
method='GET', path=self.path)
self._set_properties(api_response)
def patch(self, client=None, **kw):
"""API call: update individual dataset properties via a PATCH request.
See
https://cloud.google.com/bigquery/docs/reference/v2/datasets/patch
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:type kw: ``dict``
:param kw: properties to be patched.
:raises: ValueError for invalid value types.
"""
client = self._require_client(client)
partial = {}
if 'default_table_expiration_ms' in kw:
value = kw['default_table_expiration_ms']
if not isinstance(value, six.integer_types) and value is not None:
raise ValueError("Pass an integer, or None")
partial['defaultTableExpirationMs'] = value
if 'description' in kw:
partial['description'] = kw['description']
if 'friendly_name' in kw:
partial['friendlyName'] = kw['friendly_name']
if 'location' in kw:
partial['location'] = kw['location']
api_response = client._connection.api_request(
method='PATCH', path=self.path, data=partial)
self._set_properties(api_response)
def update(self, client=None):
"""API call: update dataset properties via a PUT request.
See
https://cloud.google.com/bigquery/docs/reference/v2/datasets/update
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
api_response = client._connection.api_request(
method='PUT', path=self.path, data=self._build_resource())
self._set_properties(api_response)
def delete(self, client=None):
"""API call: delete the dataset via a DELETE request.
See:
https://cloud.google.com/bigquery/docs/reference/v2/tables/delete
:type client: :class:`~google.cloud.bigquery.client.Client` or
``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
client._connection.api_request(method='DELETE', path=self.path)
def list_tables(self, max_results=None, page_token=None):
"""List tables for the project associated with this client.
See:
https://cloud.google.com/bigquery/docs/reference/v2/tables/list
:type max_results: int
:param max_results: (Optional) Maximum number of tables to return.
If not passed, defaults to a value set by the API.
:type page_token: str
:param page_token: (Optional) Opaque marker for the next "page" of
datasets. If not passed, the API will return the
first page of datasets.
:rtype: :class:`~google.cloud.iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.bigquery.table.Table`
contained within the current dataset.
"""
path = '/projects/%s/datasets/%s/tables' % (self.project, self.name)
result = HTTPIterator(client=self._client, path=path,
item_to_value=_item_to_table, items_key='tables',
page_token=page_token, max_results=max_results)
result.dataset = self
return result
def table(self, name, schema=()):
"""Construct a table bound to this dataset.
:type name: str
:param name: Name of the table.
:type schema: list of :class:`google.cloud.bigquery.table.SchemaField`
:param schema: The table's schema
:rtype: :class:`google.cloud.bigquery.table.Table`
:returns: a new ``Table`` instance
"""
return Table(name, dataset=self, schema=schema)
def _item_to_table(iterator, resource):
"""Convert a JSON table to the native object.
:type iterator: :class:`~google.cloud.iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type resource: dict
:param resource: An item to be converted to a table.
:rtype: :class:`~google.cloud.bigquery.table.Table`
:returns: The next table in the page.
"""
return Table.from_api_repr(resource, iterator.dataset)
| apache-2.0 |
craig5/salt-essentials-utils | example-data/extend-1/15-ext-pillar/file-base/_states/custom.py | 9 | 1906 | import os
def enforce_tmp(name, contents=None):
"""
Enforce a temp file has the desired contents.
name
The name of the file to change. (Under '/tmp'.)
contents
The value you will be storing.
"""
return_dict = {
'name': name,
'changes': {},
'result': False,
'comment': ''
}
tmp_file = os.path.join('/tmp', name)
file_ok = False
content_ok = False
file_contents = None
if os.path.isfile(tmp_file):
file_ok = True
with open(tmp_file, 'r') as fp:
file_contents = fp.read()
file_contents = file_contents.rstrip('\n')
if file_contents == contents:
content_ok = True
comments = ""
if file_ok:
comments += 'File exists ({0})\n'.format(tmp_file)
else:
comments += 'File created ({0})\n'.format(tmp_file)
if content_ok:
comments += 'Contents correct ({0})\n'.format(file_contents)
else:
comments += 'Contents updated ({0})\n'.format(contents)
return_dict['comment'] = comments
# Check if this is a test run, if so do not change anything.
if __opts__['test'] == True:
return_dict['result'] = None
return_dict['changes'] = {}
if not content_ok:
return_dict['changes'] = {
'contents': {
'old': file_contents,
'new': contents
}
}
return return_dict
if not content_ok:
with open(tmp_file, 'w') as fp:
contents += "\n"
fp.write(contents)
return_dict['result'] = True
return_dict['changes'] = {
'contents': {
'old': file_contents,
'new': contents
}
}
else:
return_dict['changes'] = {}
return_dict['result'] = True
return return_dict
| apache-2.0 |
grengojbo/grappelli-admin-tools-trunk | test_proj/test_app/tests.py | 2 | 1277 | from django.test import TestCase
class AdminBasicTest(TestCase):
def test_admin_loads(self):
self.assertTrue(self.client.login(username='staff', password='123'))
res = self.client.get('/admin/')
self.assertEqual(res.status_code, 200)
def test_permissions(self):
self.assertTrue(self.client.login(username='staff', password='123'))
res = self.client.get('/admin/')
self.assertContains(res, 'Foos')
self.assertNotContains(res, 'Bars')
self.assertNotContains(res, 'Users')
self.assertTrue(self.client.login(username='superuser', password='123'))
res = self.client.get('/admin/')
self.assertContains(res, 'Users', 2) # menu and dashboard items
def test_app_index(self):
self.client.login(username='staff', password='123')
res = self.client.get('/admin/test_app/')
self.assertEqual(res.status_code, 200)
self.assertContains(res, 'Foos')
self.assertNotContains(res, 'Bars')
self.client.login(username='superuser', password='123')
res = self.client.get('/admin/test_app/')
self.assertContains(res, 'Foos')
self.assertContains(res, 'Bars')
self.assertContains(res, 'Users', 1) # only item from menu
| mit |
itgb/opCloudRouter | qca/src/linux/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <fweisbec@gmail.com>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
dati91/servo | tests/wpt/web-platform-tests/tools/wptrunner/wptrunner/wptmanifest/node.py | 35 | 4104 | class NodeVisitor(object):
def visit(self, node):
# This is ugly as hell, but we don't have multimethods and
# they aren't trivial to fake without access to the class
# object from the class body
func = getattr(self, "visit_%s" % (node.__class__.__name__))
return func(node)
class Node(object):
def __init__(self, data=None):
self.data = data
self.parent = None
self.children = []
def append(self, other):
other.parent = self
self.children.append(other)
def remove(self):
self.parent.children.remove(self)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.data)
def __str__(self):
rv = [repr(self)]
for item in self.children:
rv.extend(" %s" % line for line in str(item).split("\n"))
return "\n".join(rv)
def __eq__(self, other):
if not (self.__class__ == other.__class__ and
self.data == other.data and
len(self.children) == len(other.children)):
return False
for child, other_child in zip(self.children, other.children):
if not child == other_child:
return False
return True
def copy(self):
new = self.__class__(self.data)
for item in self.children:
new.append(item.copy())
return new
class DataNode(Node):
def append(self, other):
# Append that retains the invariant that child data nodes
# come after child nodes of other types
other.parent = self
if isinstance(other, DataNode):
self.children.append(other)
else:
index = len(self.children)
while index > 0 and isinstance(self.children[index - 1], DataNode):
index -= 1
for i in xrange(index):
assert other.data != self.children[i].data
self.children.insert(index, other)
class KeyValueNode(Node):
def append(self, other):
# Append that retains the invariant that conditional nodes
# come before unconditional nodes
other.parent = self
if isinstance(other, ValueNode):
if self.children:
assert not isinstance(self.children[-1], ValueNode)
self.children.append(other)
else:
if self.children and isinstance(self.children[-1], ValueNode):
self.children.insert(len(self.children) - 1, other)
else:
self.children.append(other)
class ListNode(Node):
def append(self, other):
other.parent = self
self.children.append(other)
class ValueNode(Node):
def append(self, other):
raise TypeError
class AtomNode(ValueNode):
pass
class ConditionalNode(Node):
pass
class UnaryExpressionNode(Node):
def __init__(self, operator, operand):
Node.__init__(self)
self.append(operator)
self.append(operand)
def append(self, other):
Node.append(self, other)
assert len(self.children) <= 2
def copy(self):
new = self.__class__(self.children[0].copy(),
self.children[1].copy())
return new
class BinaryExpressionNode(Node):
def __init__(self, operator, operand_0, operand_1):
Node.__init__(self)
self.append(operator)
self.append(operand_0)
self.append(operand_1)
def append(self, other):
Node.append(self, other)
assert len(self.children) <= 3
def copy(self):
new = self.__class__(self.children[0].copy(),
self.children[1].copy(),
self.children[2].copy())
return new
class UnaryOperatorNode(Node):
def append(self, other):
raise TypeError
class BinaryOperatorNode(Node):
def append(self, other):
raise TypeError
class IndexNode(Node):
pass
class VariableNode(Node):
pass
class StringNode(Node):
pass
class NumberNode(ValueNode):
pass
| mpl-2.0 |
SunghanKim/numpy | numpy/ma/__init__.py | 64 | 1455 | """
=============
Masked Arrays
=============
Arrays sometimes contain invalid or missing data. When doing operations
on such arrays, we wish to suppress invalid values, which is the purpose masked
arrays fulfill (an example of typical use is given below).
For example, examine the following array:
>>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan])
When we try to calculate the mean of the data, the result is undetermined:
>>> np.mean(x)
nan
The mean is calculated using roughly ``np.sum(x)/len(x)``, but since
any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter
masked arrays:
>>> m = np.ma.masked_array(x, np.isnan(x))
>>> m
masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --],
mask = [False False False True False False False True],
fill_value=1e+20)
Here, we construct a masked array that suppress all ``NaN`` values. We
may now proceed to calculate the mean of the other values:
>>> np.mean(m)
2.6666666666666665
.. [1] Not-a-Number, a floating point value that is the result of an
invalid operation.
.. moduleauthor:: Pierre Gerard-Marchant
.. moduleauthor:: Jarrod Millman
"""
from __future__ import division, absolute_import, print_function
from . import core
from .core import *
from . import extras
from .extras import *
__all__ = ['core', 'extras']
__all__ += core.__all__
__all__ += extras.__all__
from numpy.testing import Tester
test = Tester().test
bench = Tester().bench
| bsd-3-clause |
karan1276/servo | tests/wpt/css-tests/css-text-decor-3_dev/xhtml1/reference/support/generate-text-emphasis-line-height-tests.py | 829 | 3431 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-line-height-001 ~ 004 except
001z. They test the line height expansion in different directions. This
script outputs a list of all tests it generated in the format of Mozilla
reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-line-height-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis line height, {pos}, {wm}, {tag}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-position-property">
<meta name="assert" content="text emphasis marks should expand the line height like ruby if necessary">
<link rel="match" href="text-emphasis-line-height-{index:03}-ref.html">
<p>Pass if the emphasis marks are {dir} the black line:</p>
{start}試験テスト{end}
'''
REF_FILE = 'text-emphasis-line-height-{:03}-ref.html'
REF_TEMPLATE='''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis line height, {pos}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if the emphasis marks are {dir} the black line:</p>
<div style="line-height: 1; border-{pos}: 1px solid black; writing-mode: {wm}; ruby-position: {posval}"><ruby>試<rt>●</rt>験<rt>●</rt>テ<rt>●</rt>ス<rt>●</rt>ト<rt>●</rt></ruby></div>
'''
STYLE1 = 'line-height: 1; border-{pos}: 1px solid black; ' + \
'writing-mode: {wm}; text-emphasis-position: {posval};'
STYLE2 = 'text-emphasis: circle;'
TAGS = [
# (tag, start, end)
('div', '<div style="{style1}{style2}">', '</div>'),
('span', '<div style="{style1}"><span style="{style2}">', '</span></div>'),
]
POSITIONS = [
# pos, text-emphasis-position, ruby-position,
# writing-modes, dir text
('top', 'over right', 'over',
['horizontal-tb'], 'below'),
('bottom', 'under right', 'under',
['horizontal-tb'], 'over'),
('right', 'over right', 'over',
['vertical-rl', 'vertical-lr'], 'to the left of'),
('left', 'over left', 'under',
['vertical-rl', 'vertical-lr'], 'to the right of'),
]
import string
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
print("# START tests from {}".format(__file__))
idx = 0
for (pos, emphasis_pos, ruby_pos, wms, dir) in POSITIONS:
idx += 1
ref_file = REF_FILE.format(idx)
content = REF_TEMPLATE.format(pos=pos, dir=dir, wm=wms[0], posval=ruby_pos)
write_file(ref_file, content)
suffix = iter(string.ascii_lowercase)
for wm in wms:
style1 = STYLE1.format(pos=pos, wm=wm, posval=emphasis_pos)
for (tag, start, end) in TAGS:
test_file = TEST_FILE.format(idx, next(suffix))
content = TEST_TEMPLATE.format(
pos=pos, wm=wm, tag=tag, index=idx, dir=dir,
start=start.format(style1=style1, style2=STYLE2), end=end)
write_file(test_file, content)
print("== {} {}".format(test_file, ref_file))
print("# END tests from {}".format(__file__))
| mpl-2.0 |
inmomentsoftware/teams | wtforms/ext/sqlalchemy/fields.py | 177 | 6848 | """
Useful form fields for use with SQLAlchemy ORM.
"""
from __future__ import unicode_literals
import operator
from wtforms import widgets
from wtforms.compat import text_type, string_types
from wtforms.fields import SelectFieldBase
from wtforms.validators import ValidationError
try:
from sqlalchemy.orm.util import identity_key
has_identity_key = True
except ImportError:
has_identity_key = False
__all__ = (
'QuerySelectField', 'QuerySelectMultipleField',
)
class QuerySelectField(SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, query_factory=None,
get_pk=None, get_label=None, allow_blank=False,
blank_text='', **kwargs):
super(QuerySelectField, self).__init__(label, validators, **kwargs)
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception('The sqlalchemy identity_key function could not be imported.')
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
if self._formdata is not None:
for pk, obj in self._get_object_list():
if pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query or self.query_factory()
get_pk = self.get_pk
self._object_list = list((text_type(get_pk(obj)), obj) for obj in query)
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
data = self.data
if data is not None:
for pk, obj in self._get_object_list():
if data == obj:
break
else:
raise ValidationError(self.gettext('Not a valid choice'))
elif self._formdata or not self.allow_blank:
raise ValidationError(self.gettext('Not a valid choice'))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(self, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(QuerySelectMultipleField, self).__init__(label, validators, default=default, **kwargs)
if kwargs.get('allow_blank', False):
import warnings
warnings.warn('allow_blank=True does not do anything for QuerySelectMultipleField.')
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._get_object_list():
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj in self.data)
def process_formdata(self, valuelist):
self._formdata = set(valuelist)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext('Not a valid choice'))
elif self.data:
obj_list = list(x[1] for x in self._get_object_list())
for v in self.data:
if v not in obj_list:
raise ValidationError(self.gettext('Not a valid choice'))
def get_pk_from_identity(obj):
cls, key = identity_key(instance=obj)
return ':'.join(text_type(x) for x in key)
| agpl-3.0 |
fuzeman/plex.metadata.py | tests/guid/test_legacy.py | 1 | 1421 | import logging
logging.basicConfig(level=logging.DEBUG)
from plex_metadata import Guid
def test_tvdb_show():
guids = [
'com.plexapp.agents.thetvdb://12345?lang=en'
]
for item in guids:
r = Guid.parse(item)
assert r.agent == 'tvdb'
assert r.sid == '12345'
assert r.season is None
assert r.episode is None
def test_tvdb_episode():
guids = [
'com.plexapp.agents.abstvdb://12345/13/52?lang=en',
'com.plexapp.agents.thetvdb://12345/13/52?lang=en',
'com.plexapp.agents.thetvdbdvdorder://12345/13/52?lang=en',
'com.plexapp.agents.xbmcnfotv://12345/13/52?lang=en',
'com.plexapp.agents.mcm://MCM_TV_A_12345/13/52?lang=en'
]
for item in guids:
r = Guid.parse(item)
assert r.agent == 'tvdb'
assert r.sid == '12345'
assert r.season == 13
assert r.episode == 52
def test_imdb():
guids = [
'com.plexapp.agents.imdb://tt12345',
'com.plexapp.agents.xbmcnfotv://tt12345'
]
for item in guids:
r = Guid.parse(item)
assert r.agent == 'imdb'
assert r.sid == 'tt12345'
def test_tmdb():
guids = [
'com.plexapp.agents.standalone://12345',
'com.plexapp.agents.themoviedb://12345'
]
for item in guids:
r = Guid.parse(item)
assert r.agent == 'tmdb'
assert r.sid == '12345'
| mit |
MrLoick/python-for-android | python-build/python-libs/gdata/tests/atom_tests/client_test.py | 87 | 8010 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# This test may make an actual HTTP request.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import atom.http_core
import atom.auth
import atom.client
import atom.mock_http_core
class AtomPubClientEchoTest(unittest.TestCase):
def test_simple_request_with_no_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
self.assert_(client.host is None)
self.assert_(client.auth_token is None)
# Make several equivalent requests.
responses = [client.request('GET', 'http://example.org/'),
client.request(http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri('http', 'example.org', path='/'),
method='GET')),
client.request('GET',
http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri('http', 'example.org',
path='/')))]
for response in responses:
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assertTrue(response.getheader('User-Agent').startswith('gdata-py/'))
def test_auth_request_with_no_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
token = atom.auth.BasicAuth('Jeff', '123')
response = client.request('POST', 'https://example.net:8080/',
auth_token=token)
self.assert_(response.getheader('Echo-Host') == 'example.net:8080')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'https')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
self.assert_(response.getheader('Echo-Method') == 'POST')
def test_request_with_client_defaults(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient(),
'example.com', atom.auth.BasicAuth('Jeff', '123'))
self.assert_(client.host == 'example.com')
self.assert_(client.auth_token is not None)
self.assert_(client.auth_token.basic_cookie == 'SmVmZjoxMjM=')
response = client.request('GET', 'http://example.org/')
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
response = client.request('GET', '/')
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Scheme') == 'http')
self.assert_(response.getheader('Authorization') == 'Basic SmVmZjoxMjM=')
response = client.request('GET', '/',
http_request=atom.http_core.HttpRequest(
uri=atom.http_core.Uri(port=99)))
self.assert_(response.getheader('Echo-Host') == 'example.com:99')
self.assert_(response.getheader('Echo-Uri') == '/')
def test_get(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
response = client.get('http://example.com/simple')
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/simple')
self.assert_(response.getheader('Echo-Method') == 'GET')
response = client.Get(uri='http://example.com/simple2')
self.assert_(response.getheader('Echo-Uri') == '/simple2')
self.assert_(response.getheader('Echo-Method') == 'GET')
def test_modify_request_using_args(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class RequestModifier(object):
def modify_request(self, http_request):
http_request.headers['Special'] = 'Set'
response = client.get('http://example.com/modified',
extra=RequestModifier())
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/modified')
self.assert_(response.getheader('Echo-Method') == 'GET')
self.assert_(response.getheader('Special') == 'Set')
def test_post(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class TestData(object):
def modify_request(self, http_request):
http_request.add_body_part('test body', 'text/testdata')
response = client.Post(uri='http://example.com/', data=TestData())
self.assert_(response.getheader('Echo-Host') == 'example.com:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Method') == 'POST')
self.assert_(response.getheader('Content-Length') == str(len('test body')))
self.assert_(response.getheader('Content-Type') == 'text/testdata')
self.assert_(response.read(2) == 'te')
self.assert_(response.read() == 'st body')
response = client.post(data=TestData(), uri='http://example.com/')
self.assert_(response.read() == 'test body')
self.assert_(response.getheader('Content-Type') == 'text/testdata')
# Don't pass in a body, but use an extra kwarg to add the body to the
# http_request.
response = client.post(x=TestData(), uri='http://example.com/')
self.assert_(response.read() == 'test body')
def test_put(self):
body_text = '<put>test</put>'
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient())
class TestData(object):
def modify_request(self, http_request):
http_request.add_body_part(body_text, 'application/xml')
response = client.put('http://example.org', TestData())
self.assert_(response.getheader('Echo-Host') == 'example.org:None')
self.assert_(response.getheader('Echo-Uri') == '/')
self.assert_(response.getheader('Echo-Method') == 'PUT')
self.assert_(response.getheader('Content-Length') == str(len(body_text)))
self.assert_(response.getheader('Content-Type') == 'application/xml')
response = client.put(uri='http://example.org', data=TestData())
self.assert_(response.getheader('Content-Length') == str(len(body_text)))
self.assert_(response.getheader('Content-Type') == 'application/xml')
def test_delete(self):
client = atom.client.AtomPubClient(atom.mock_http_core.EchoHttpClient(),
source='my new app')
response = client.Delete('http://example.com/simple')
self.assertEqual(response.getheader('Echo-Host'), 'example.com:None')
self.assertEqual(response.getheader('Echo-Uri'), '/simple')
self.assertEqual(response.getheader('Echo-Method'), 'DELETE')
response = client.delete(uri='http://example.com/d')
self.assertEqual(response.getheader('Echo-Uri'), '/d')
self.assertEqual(response.getheader('Echo-Method'), 'DELETE')
self.assertTrue(
response.getheader('User-Agent').startswith('my new app gdata-py/'))
def suite():
return unittest.TestSuite((unittest.makeSuite(AtomPubClientEchoTest, 'test'),
))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Lujeni/ansible | test/units/modules/network/check_point/test_cp_mgmt_dns_domain_facts.py | 19 | 2849 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_dns_domain_facts
OBJECT = {
"from": 1,
"to": 1,
"total": 6,
"objects": [
"53de74b7-8f19-4cbe-99fc-a81ef0759bad"
]
}
SHOW_PLURAL_PAYLOAD = {
'limit': 1,
'details_level': 'uid'
}
SHOW_SINGLE_PAYLOAD = {
'name': 'object_which_is_not_exist'
}
api_call_object = 'dns-domain'
api_call_object_plural_version = 'dns-domains'
failure_msg = '''{u'message': u'Requested object [object_which_is_not_exist] not found', u'code': u'generic_err_object_not_found'}'''
class TestCheckpointDnsDomainFacts(object):
module = cp_mgmt_dns_domain_facts
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_show_single_object_which_is_not_exist(self, mocker, connection_mock):
connection_mock.send_request.return_value = (404, failure_msg)
try:
result = self._run_module(SHOW_SINGLE_PAYLOAD)
except Exception as e:
result = e.args[0]
assert result['failed']
assert 'Checkpoint device returned error 404 with message ' + failure_msg == result['msg']
def test_show_few_objects(self, mocker, connection_mock):
connection_mock.send_request.return_value = (200, OBJECT)
result = self._run_module(SHOW_PLURAL_PAYLOAD)
assert not result['changed']
assert OBJECT == result['ansible_facts'][api_call_object_plural_version]
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 |
vbshah1992/microblog | flask/lib/python2.7/site-packages/babel/messages/catalog.py | 86 | 32207 | # -*- coding: utf-8 -*-
"""
babel.messages.catalog
~~~~~~~~~~~~~~~~~~~~~~
Data structures for message catalogs.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import re
import time
from cgi import parse_header
from datetime import datetime, time as time_
from difflib import get_close_matches
from email import message_from_string
from copy import copy
from babel import __version__ as VERSION
from babel.core import Locale
from babel.dates import format_datetime
from babel.messages.plurals import get_plural
from babel.util import odict, distinct, LOCALTZ, FixedOffsetTimezone
from babel._compat import string_types, number_types, PY2, cmp
__all__ = ['Message', 'Catalog', 'TranslationError']
PYTHON_FORMAT = re.compile(r'''(?x)
\%
(?:\(([\w]*)\))?
(
[-#0\ +]?(?:\*|[\d]+)?
(?:\.(?:\*|[\d]+))?
[hlL]?
)
([diouxXeEfFgGcrs%])
''')
class Message(object):
"""Representation of a single message in a catalog."""
def __init__(self, id, string=u'', locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Create the message object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments for the message
:param user_comments: a sequence of user comments for the message
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
self.id = id #: The message ID
if not string and self.pluralizable:
string = (u'', u'')
self.string = string #: The message translation
self.locations = list(distinct(locations))
self.flags = set(flags)
if id and self.python_format:
self.flags.add('python-format')
else:
self.flags.discard('python-format')
self.auto_comments = list(distinct(auto_comments))
self.user_comments = list(distinct(user_comments))
if isinstance(previous_id, string_types):
self.previous_id = [previous_id]
else:
self.previous_id = list(previous_id)
self.lineno = lineno
self.context = context
def __repr__(self):
return '<%s %r (flags: %r)>' % (type(self).__name__, self.id,
list(self.flags))
def __cmp__(self, obj):
"""Compare Messages, taking into account plural ids"""
def values_to_compare():
if isinstance(obj, Message):
plural = self.pluralizable
obj_plural = obj.pluralizable
if plural and obj_plural:
return self.id[0], obj.id[0]
elif plural:
return self.id[0], obj.id
elif obj_plural:
return self.id, obj.id[0]
return self.id, obj.id
this, other = values_to_compare()
return cmp(this, other)
def __gt__(self, other):
return self.__cmp__(other) > 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __eq__(self, other):
return self.__cmp__(other) == 0
def __ne__(self, other):
return self.__cmp__(other) != 0
def clone(self):
return Message(*map(copy, (self.id, self.string, self.locations,
self.flags, self.auto_comments,
self.user_comments, self.previous_id,
self.lineno, self.context)))
def check(self, catalog=None):
"""Run various validation checks on the message. Some validations
are only performed if the catalog is provided. This method returns
a sequence of `TranslationError` objects.
:rtype: ``iterator``
:param catalog: A catalog instance that is passed to the checkers
:see: `Catalog.check` for a way to perform checks for all messages
in a catalog.
"""
from babel.messages.checkers import checkers
errors = []
for checker in checkers:
try:
checker(catalog, self)
except TranslationError as e:
errors.append(e)
return errors
@property
def fuzzy(self):
"""Whether the translation is fuzzy.
>>> Message('foo').fuzzy
False
>>> msg = Message('foo', 'foo', flags=['fuzzy'])
>>> msg.fuzzy
True
>>> msg
<Message 'foo' (flags: ['fuzzy'])>
:type: `bool`"""
return 'fuzzy' in self.flags
@property
def pluralizable(self):
"""Whether the message is plurizable.
>>> Message('foo').pluralizable
False
>>> Message(('foo', 'bar')).pluralizable
True
:type: `bool`"""
return isinstance(self.id, (list, tuple))
@property
def python_format(self):
"""Whether the message contains Python-style parameters.
>>> Message('foo %(name)s bar').python_format
True
>>> Message(('foo %(name)s', 'foo %(name)s')).python_format
True
:type: `bool`"""
ids = self.id
if not isinstance(ids, (list, tuple)):
ids = [ids]
return any(PYTHON_FORMAT.search(id) for id in ids)
class TranslationError(Exception):
"""Exception thrown by translation checkers when invalid message
translations are encountered."""
DEFAULT_HEADER = u"""\
# Translations template for PROJECT.
# Copyright (C) YEAR ORGANIZATION
# This file is distributed under the same license as the PROJECT project.
# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
#"""
if PY2:
def _parse_header(header_string):
# message_from_string only works for str, not for unicode
headers = message_from_string(header_string.encode('utf8'))
decoded_headers = {}
for name, value in headers.items():
name = name.decode('utf8')
value = value.decode('utf8')
decoded_headers[name] = value
return decoded_headers
else:
_parse_header = message_from_string
class Catalog(object):
"""Representation of a message catalog."""
def __init__(self, locale=None, domain=None, header_comment=DEFAULT_HEADER,
project=None, version=None, copyright_holder=None,
msgid_bugs_address=None, creation_date=None,
revision_date=None, last_translator=None, language_team=None,
charset=None, fuzzy=True):
"""Initialize the catalog object.
:param locale: the locale identifier or `Locale` object, or `None`
if the catalog is not bound to a locale (which basically
means it's a template)
:param domain: the message domain
:param header_comment: the header comment as string, or `None` for the
default header
:param project: the project's name
:param version: the project's version
:param copyright_holder: the copyright holder of the catalog
:param msgid_bugs_address: the email address or URL to submit bug
reports to
:param creation_date: the date the catalog was created
:param revision_date: the date the catalog was revised
:param last_translator: the name and email of the last translator
:param language_team: the name and email of the language team
:param charset: the encoding to use in the output (defaults to utf-8)
:param fuzzy: the fuzzy bit on the catalog header
"""
self.domain = domain #: The message domain
if locale:
locale = Locale.parse(locale)
self.locale = locale #: The locale or `None`
self._header_comment = header_comment
self._messages = odict()
self.project = project or 'PROJECT' #: The project name
self.version = version or 'VERSION' #: The project version
self.copyright_holder = copyright_holder or 'ORGANIZATION'
self.msgid_bugs_address = msgid_bugs_address or 'EMAIL@ADDRESS'
self.last_translator = last_translator or 'FULL NAME <EMAIL@ADDRESS>'
"""Name and email address of the last translator."""
self.language_team = language_team or 'LANGUAGE <LL@li.org>'
"""Name and email address of the language team."""
self.charset = charset or 'utf-8'
if creation_date is None:
creation_date = datetime.now(LOCALTZ)
elif isinstance(creation_date, datetime) and not creation_date.tzinfo:
creation_date = creation_date.replace(tzinfo=LOCALTZ)
self.creation_date = creation_date #: Creation date of the template
if revision_date is None:
revision_date = 'YEAR-MO-DA HO:MI+ZONE'
elif isinstance(revision_date, datetime) and not revision_date.tzinfo:
revision_date = revision_date.replace(tzinfo=LOCALTZ)
self.revision_date = revision_date #: Last revision date of the catalog
self.fuzzy = fuzzy #: Catalog header fuzzy bit (`True` or `False`)
self.obsolete = odict() #: Dictionary of obsolete messages
self._num_plurals = None
self._plural_expr = None
def _get_header_comment(self):
comment = self._header_comment
year = datetime.now(LOCALTZ).strftime('%Y')
if hasattr(self.revision_date, 'strftime'):
year = self.revision_date.strftime('%Y')
comment = comment.replace('PROJECT', self.project) \
.replace('VERSION', self.version) \
.replace('YEAR', year) \
.replace('ORGANIZATION', self.copyright_holder)
if self.locale:
comment = comment.replace('Translations template', '%s translations'
% self.locale.english_name)
return comment
def _set_header_comment(self, string):
self._header_comment = string
header_comment = property(_get_header_comment, _set_header_comment, doc="""\
The header comment for the catalog.
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> print catalog.header_comment #doctest: +ELLIPSIS
# Translations template for Foobar.
# Copyright (C) ... Foo Company
# This file is distributed under the same license as the Foobar project.
# FIRST AUTHOR <EMAIL@ADDRESS>, ....
#
The header can also be set from a string. Any known upper-case variables
will be replaced when the header is retrieved again:
>>> catalog = Catalog(project='Foobar', version='1.0',
... copyright_holder='Foo Company')
>>> catalog.header_comment = '''\\
... # The POT for my really cool PROJECT project.
... # Copyright (C) 1990-2003 ORGANIZATION
... # This file is distributed under the same license as the PROJECT
... # project.
... #'''
>>> print catalog.header_comment
# The POT for my really cool Foobar project.
# Copyright (C) 1990-2003 Foo Company
# This file is distributed under the same license as the Foobar
# project.
#
:type: `unicode`
""")
def _get_mime_headers(self):
headers = []
headers.append(('Project-Id-Version',
'%s %s' % (self.project, self.version)))
headers.append(('Report-Msgid-Bugs-To', self.msgid_bugs_address))
headers.append(('POT-Creation-Date',
format_datetime(self.creation_date, 'yyyy-MM-dd HH:mmZ',
locale='en')))
if isinstance(self.revision_date, (datetime, time_) + number_types):
headers.append(('PO-Revision-Date',
format_datetime(self.revision_date,
'yyyy-MM-dd HH:mmZ', locale='en')))
else:
headers.append(('PO-Revision-Date', self.revision_date))
headers.append(('Last-Translator', self.last_translator))
if (self.locale is not None) and ('LANGUAGE' in self.language_team):
headers.append(('Language-Team',
self.language_team.replace('LANGUAGE',
str(self.locale))))
else:
headers.append(('Language-Team', self.language_team))
if self.locale is not None:
headers.append(('Plural-Forms', self.plural_forms))
headers.append(('MIME-Version', '1.0'))
headers.append(('Content-Type',
'text/plain; charset=%s' % self.charset))
headers.append(('Content-Transfer-Encoding', '8bit'))
headers.append(('Generated-By', 'Babel %s\n' % VERSION))
return headers
def _set_mime_headers(self, headers):
for name, value in headers:
name = name.lower()
if name == 'project-id-version':
parts = value.split(' ')
self.project = u' '.join(parts[:-1])
self.version = parts[-1]
elif name == 'report-msgid-bugs-to':
self.msgid_bugs_address = value
elif name == 'last-translator':
self.last_translator = value
elif name == 'language-team':
self.language_team = value
elif name == 'content-type':
mimetype, params = parse_header(value)
if 'charset' in params:
self.charset = params['charset'].lower()
elif name == 'plural-forms':
_, params = parse_header(' ;' + value)
self._num_plurals = int(params.get('nplurals', 2))
self._plural_expr = params.get('plural', '(n != 1)')
elif name == 'pot-creation-date':
# FIXME: this should use dates.parse_datetime as soon as that
# is ready
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
# Separate the offset into a sign component, hours, and minutes
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = datetime.fromtimestamp(ts)
self.creation_date = dt.replace(tzinfo=tzoffset)
elif name == 'po-revision-date':
# Keep the value if it's not the default one
if 'YEAR' not in value:
# FIXME: this should use dates.parse_datetime as soon as
# that is ready
value, tzoffset, _ = re.split('([+-]\d{4})$', value, 1)
tt = time.strptime(value, '%Y-%m-%d %H:%M')
ts = time.mktime(tt)
# Separate the offset into a sign component, hours, and
# minutes
plus_minus_s, rest = tzoffset[0], tzoffset[1:]
hours_offset_s, mins_offset_s = rest[:2], rest[2:]
# Make them all integers
plus_minus = int(plus_minus_s + '1')
hours_offset = int(hours_offset_s)
mins_offset = int(mins_offset_s)
# Calculate net offset
net_mins_offset = hours_offset * 60
net_mins_offset += mins_offset
net_mins_offset *= plus_minus
# Create an offset object
tzoffset = FixedOffsetTimezone(net_mins_offset)
# Store the offset in a datetime object
dt = datetime.fromtimestamp(ts)
self.revision_date = dt.replace(tzinfo=tzoffset)
mime_headers = property(_get_mime_headers, _set_mime_headers, doc="""\
The MIME headers of the catalog, used for the special ``msgid ""`` entry.
The behavior of this property changes slightly depending on whether a locale
is set or not, the latter indicating that the catalog is actually a template
for actual translations.
Here's an example of the output for such a catalog template:
>>> from babel.dates import UTC
>>> created = datetime(1990, 4, 1, 15, 30, tzinfo=UTC)
>>> catalog = Catalog(project='Foobar', version='1.0',
... creation_date=created)
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE
Last-Translator: FULL NAME <EMAIL@ADDRESS>
Language-Team: LANGUAGE <LL@li.org>
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
And here's an example of the output when the locale is set:
>>> revised = datetime(1990, 8, 3, 12, 0, tzinfo=UTC)
>>> catalog = Catalog(locale='de_DE', project='Foobar', version='1.0',
... creation_date=created, revision_date=revised,
... last_translator='John Doe <jd@example.com>',
... language_team='de_DE <de@example.com>')
>>> for name, value in catalog.mime_headers:
... print '%s: %s' % (name, value)
Project-Id-Version: Foobar 1.0
Report-Msgid-Bugs-To: EMAIL@ADDRESS
POT-Creation-Date: 1990-04-01 15:30+0000
PO-Revision-Date: 1990-08-03 12:00+0000
Last-Translator: John Doe <jd@example.com>
Language-Team: de_DE <de@example.com>
Plural-Forms: nplurals=2; plural=(n != 1)
MIME-Version: 1.0
Content-Type: text/plain; charset=utf-8
Content-Transfer-Encoding: 8bit
Generated-By: Babel ...
:type: `list`
""")
@property
def num_plurals(self):
"""The number of plurals used by the catalog or locale.
>>> Catalog(locale='en').num_plurals
2
>>> Catalog(locale='ga').num_plurals
3
:type: `int`"""
if self._num_plurals is None:
num = 2
if self.locale:
num = get_plural(self.locale)[0]
self._num_plurals = num
return self._num_plurals
@property
def plural_expr(self):
"""The plural expression used by the catalog or locale.
>>> Catalog(locale='en').plural_expr
'(n != 1)'
>>> Catalog(locale='ga').plural_expr
'(n==1 ? 0 : n==2 ? 1 : 2)'
:type: `string_types`"""
if self._plural_expr is None:
expr = '(n != 1)'
if self.locale:
expr = get_plural(self.locale)[1]
self._plural_expr = expr
return self._plural_expr
@property
def plural_forms(self):
"""Return the plural forms declaration for the locale.
>>> Catalog(locale='en').plural_forms
'nplurals=2; plural=(n != 1)'
>>> Catalog(locale='pt_BR').plural_forms
'nplurals=2; plural=(n > 1)'
:type: `str`"""
return 'nplurals=%s; plural=%s' % (self.num_plurals, self.plural_expr)
def __contains__(self, id):
"""Return whether the catalog has a message with the specified ID."""
return self._key_for(id) in self._messages
def __len__(self):
"""The number of messages in the catalog.
This does not include the special ``msgid ""`` entry."""
return len(self._messages)
def __iter__(self):
"""Iterates through all the entries in the catalog, in the order they
were added, yielding a `Message` object for every entry.
:rtype: ``iterator``"""
buf = []
for name, value in self.mime_headers:
buf.append('%s: %s' % (name, value))
flags = set()
if self.fuzzy:
flags |= set(['fuzzy'])
yield Message(u'', '\n'.join(buf), flags=flags)
for key in self._messages:
yield self._messages[key]
def __repr__(self):
locale = ''
if self.locale:
locale = ' %s' % self.locale
return '<%s %r%s>' % (type(self).__name__, self.domain, locale)
def __delitem__(self, id):
"""Delete the message with the specified ID."""
self.delete(id)
def __getitem__(self, id):
"""Return the message with the specified ID.
:param id: the message ID
"""
return self.get(id)
def __setitem__(self, id, message):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo')
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
If a message with that ID is already in the catalog, it is updated
to include the locations and flags of the new message.
>>> catalog = Catalog()
>>> catalog[u'foo'] = Message(u'foo', locations=[('main.py', 1)])
>>> catalog[u'foo'].locations
[('main.py', 1)]
>>> catalog[u'foo'] = Message(u'foo', locations=[('utils.py', 5)])
>>> catalog[u'foo'].locations
[('main.py', 1), ('utils.py', 5)]
:param id: the message ID
:param message: the `Message` object
"""
assert isinstance(message, Message), 'expected a Message object'
key = self._key_for(id, message.context)
current = self._messages.get(key)
if current:
if message.pluralizable and not current.pluralizable:
# The new message adds pluralization
current.id = message.id
current.string = message.string
current.locations = list(distinct(current.locations +
message.locations))
current.auto_comments = list(distinct(current.auto_comments +
message.auto_comments))
current.user_comments = list(distinct(current.user_comments +
message.user_comments))
current.flags |= message.flags
message = current
elif id == '':
# special treatment for the header message
self.mime_headers = _parse_header(message.string).items()
self.header_comment = '\n'.join([('# %s' % c).rstrip() for c
in message.user_comments])
self.fuzzy = message.fuzzy
else:
if isinstance(id, (list, tuple)):
assert isinstance(message.string, (list, tuple)), \
'Expected sequence but got %s' % type(message.string)
self._messages[key] = message
def add(self, id, string=None, locations=(), flags=(), auto_comments=(),
user_comments=(), previous_id=(), lineno=None, context=None):
"""Add or update the message with the specified ID.
>>> catalog = Catalog()
>>> catalog.add(u'foo')
<Message ...>
>>> catalog[u'foo']
<Message u'foo' (flags: [])>
This method simply constructs a `Message` object with the given
arguments and invokes `__setitem__` with that object.
:param id: the message ID, or a ``(singular, plural)`` tuple for
pluralizable messages
:param string: the translated message string, or a
``(singular, plural)`` tuple for pluralizable messages
:param locations: a sequence of ``(filenname, lineno)`` tuples
:param flags: a set or sequence of flags
:param auto_comments: a sequence of automatic comments
:param user_comments: a sequence of user comments
:param previous_id: the previous message ID, or a ``(singular, plural)``
tuple for pluralizable messages
:param lineno: the line number on which the msgid line was found in the
PO file, if any
:param context: the message context
"""
message = Message(id, string, list(locations), flags, auto_comments,
user_comments, previous_id, lineno=lineno,
context=context)
self[id] = message
return message
def check(self):
"""Run various validation checks on the translations in the catalog.
For every message which fails validation, this method yield a
``(message, errors)`` tuple, where ``message`` is the `Message` object
and ``errors`` is a sequence of `TranslationError` objects.
:rtype: ``iterator``
"""
for message in self._messages.values():
errors = message.check(catalog=self)
if errors:
yield message, errors
def get(self, id, context=None):
"""Return the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
return self._messages.get(self._key_for(id, context))
def delete(self, id, context=None):
"""Delete the message with the specified ID and context.
:param id: the message ID
:param context: the message context, or ``None`` for no context
"""
key = self._key_for(id, context)
if key in self._messages:
del self._messages[key]
def update(self, template, no_fuzzy_matching=False):
"""Update the catalog based on the given template catalog.
>>> from babel.messages import Catalog
>>> template = Catalog()
>>> template.add('green', locations=[('main.py', 99)])
<Message ...>
>>> template.add('blue', locations=[('main.py', 100)])
<Message ...>
>>> template.add(('salad', 'salads'), locations=[('util.py', 42)])
<Message ...>
>>> catalog = Catalog(locale='de_DE')
>>> catalog.add('blue', u'blau', locations=[('main.py', 98)])
<Message ...>
>>> catalog.add('head', u'Kopf', locations=[('util.py', 33)])
<Message ...>
>>> catalog.add(('salad', 'salads'), (u'Salat', u'Salate'),
... locations=[('util.py', 38)])
<Message ...>
>>> catalog.update(template)
>>> len(catalog)
3
>>> msg1 = catalog['green']
>>> msg1.string
>>> msg1.locations
[('main.py', 99)]
>>> msg2 = catalog['blue']
>>> msg2.string
u'blau'
>>> msg2.locations
[('main.py', 100)]
>>> msg3 = catalog['salad']
>>> msg3.string
(u'Salat', u'Salate')
>>> msg3.locations
[('util.py', 42)]
Messages that are in the catalog but not in the template are removed
from the main collection, but can still be accessed via the `obsolete`
member:
>>> 'head' in catalog
False
>>> catalog.obsolete.values()
[<Message 'head' (flags: [])>]
:param template: the reference catalog, usually read from a POT file
:param no_fuzzy_matching: whether to use fuzzy matching of message IDs
"""
messages = self._messages
remaining = messages.copy()
self._messages = odict()
# Prepare for fuzzy matching
fuzzy_candidates = []
if not no_fuzzy_matching:
fuzzy_candidates = dict([
(self._key_for(msgid), messages[msgid].context)
for msgid in messages if msgid and messages[msgid].string
])
fuzzy_matches = set()
def _merge(message, oldkey, newkey):
message = message.clone()
fuzzy = False
if oldkey != newkey:
fuzzy = True
fuzzy_matches.add(oldkey)
oldmsg = messages.get(oldkey)
if isinstance(oldmsg.id, string_types):
message.previous_id = [oldmsg.id]
else:
message.previous_id = list(oldmsg.id)
else:
oldmsg = remaining.pop(oldkey, None)
message.string = oldmsg.string
if isinstance(message.id, (list, tuple)):
if not isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = tuple(
[message.string] + ([u''] * (len(message.id) - 1))
)
elif len(message.string) != self.num_plurals:
fuzzy = True
message.string = tuple(message.string[:len(oldmsg.string)])
elif isinstance(message.string, (list, tuple)):
fuzzy = True
message.string = message.string[0]
message.flags |= oldmsg.flags
if fuzzy:
message.flags |= set([u'fuzzy'])
self[message.id] = message
for message in template:
if message.id:
key = self._key_for(message.id, message.context)
if key in messages:
_merge(message, key, key)
else:
if no_fuzzy_matching is False:
# do some fuzzy matching with difflib
if isinstance(key, tuple):
matchkey = key[0] # just the msgid, no context
else:
matchkey = key
matches = get_close_matches(matchkey.lower().strip(),
fuzzy_candidates.keys(), 1)
if matches:
newkey = matches[0]
newctxt = fuzzy_candidates[newkey]
if newctxt is not None:
newkey = newkey, newctxt
_merge(message, newkey, key)
continue
self[message.id] = message
for msgid in remaining:
if no_fuzzy_matching or msgid not in fuzzy_matches:
self.obsolete[msgid] = remaining[msgid]
# Make updated catalog's POT-Creation-Date equal to the template
# used to update the catalog
self.creation_date = template.creation_date
def _key_for(self, id, context=None):
"""The key for a message is just the singular ID even for pluralizable
messages, but is a ``(msgid, msgctxt)`` tuple for context-specific
messages.
"""
key = id
if isinstance(key, (list, tuple)):
key = id[0]
if context is not None:
key = (key, context)
return key
| bsd-3-clause |
marco-lancini/Showcase | django/contrib/contenttypes/tests.py | 152 | 2951 | from django import db
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.contenttypes.views import shortcut
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpRequest
from django.test import TestCase
class ContentTypesTests(TestCase):
def setUp(self):
# First, let's make sure we're dealing with a blank slate (and that
# DEBUG is on so that queries get logged)
self.old_DEBUG = settings.DEBUG
self.old_Site_meta_installed = Site._meta.installed
settings.DEBUG = True
ContentType.objects.clear_cache()
db.reset_queries()
def tearDown(self):
settings.DEBUG = self.old_DEBUG
Site._meta.installed = self.old_Site_meta_installed
ContentType.objects.clear_cache()
def test_lookup_cache(self):
"""
Make sure that the content type cache (see ContentTypeManager)
works correctly. Lookups for a particular content type -- by model or
by ID -- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
# A second hit, though, won't hit the DB, nor will a lookup by ID
ct = ContentType.objects.get_for_model(ContentType)
self.assertEqual(1, len(db.connection.queries))
ContentType.objects.get_for_id(ct.id)
self.assertEqual(1, len(db.connection.queries))
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
ContentType.objects.get_for_model(ContentType)
len(db.connection.queries)
self.assertEqual(2, len(db.connection.queries))
def test_shortcut_view(self):
"""
Check that the shortcut view (used for the admin "view on site"
functionality) returns a complete URL regardless of whether the sites
framework is installed
"""
request = HttpRequest()
request.META = {
"SERVER_NAME": "Example.com",
"SERVER_PORT": "80",
}
from django.contrib.auth.models import User
user_ct = ContentType.objects.get_for_model(User)
obj = User.objects.create(username="john")
if Site._meta.installed:
current_site = Site.objects.get_current()
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://%s/users/john/" % current_site.domain,
response._headers.get("location")[1])
Site._meta.installed = False
response = shortcut(request, user_ct.id, obj.id)
self.assertEqual("http://Example.com/users/john/",
response._headers.get("location")[1])
| mit |
itkovian/sqlalchemy | lib/sqlalchemy/testing/pickleable.py | 81 | 2641 | # testing/pickleable.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Classes used in pickling tests, need to be at the module level for
unpickling.
"""
from . import fixtures
class User(fixtures.ComparableEntity):
pass
class Order(fixtures.ComparableEntity):
pass
class Dingaling(fixtures.ComparableEntity):
pass
class EmailUser(User):
pass
class Address(fixtures.ComparableEntity):
pass
# TODO: these are kind of arbitrary....
class Child1(fixtures.ComparableEntity):
pass
class Child2(fixtures.ComparableEntity):
pass
class Parent(fixtures.ComparableEntity):
pass
class Screen(object):
def __init__(self, obj, parent=None):
self.obj = obj
self.parent = parent
class Foo(object):
def __init__(self, moredata):
self.data = 'im data'
self.stuff = 'im stuff'
self.moredata = moredata
__hash__ = object.__hash__
def __eq__(self, other):
return other.data == self.data and \
other.stuff == self.stuff and \
other.moredata == self.moredata
class Bar(object):
def __init__(self, x, y):
self.x = x
self.y = y
__hash__ = object.__hash__
def __eq__(self, other):
return other.__class__ is self.__class__ and \
other.x == self.x and \
other.y == self.y
def __str__(self):
return "Bar(%d, %d)" % (self.x, self.y)
class OldSchool:
def __init__(self, x, y):
self.x = x
self.y = y
def __eq__(self, other):
return other.__class__ is self.__class__ and \
other.x == self.x and \
other.y == self.y
class OldSchoolWithoutCompare:
def __init__(self, x, y):
self.x = x
self.y = y
class BarWithoutCompare(object):
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "Bar(%d, %d)" % (self.x, self.y)
class NotComparable(object):
def __init__(self, data):
self.data = data
def __hash__(self):
return id(self)
def __eq__(self, other):
return NotImplemented
def __ne__(self, other):
return NotImplemented
class BrokenComparable(object):
def __init__(self, data):
self.data = data
def __hash__(self):
return id(self)
def __eq__(self, other):
raise NotImplementedError
def __ne__(self, other):
raise NotImplementedError
| mit |
alex-pirozhenko/sklearn-pmml | sklearn_pmml/convert/test/test_decisionTreeClassifierConverter.py | 2 | 5880 | import numpy as np
from sklearn_pmml.convert.test.jpmml_test import JPMMLClassificationTest, JPMMLRegressionTest, TARGET_NAME, TARGET
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn_pmml.convert import TransformationContext, pmml_row, ModelMode, Schema
from sklearn_pmml.convert.features import *
from sklearn_pmml.convert.tree import DecisionTreeConverter
from sklearn_pmml import pmml
from unittest import TestCase
class TestDecisionTreeClassifierConverter(TestCase):
def setUp(self):
np.random.seed(1)
self.est = DecisionTreeClassifier(max_depth=2)
self.est.fit([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
], [0, 1, 1, 1])
self.ctx = TransformationContext({
Schema.INPUT: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.MODEL: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.DERIVED: [],
Schema.OUTPUT: [
IntegerNumericFeature('output')
]
})
self.converter = DecisionTreeConverter(
estimator=self.est,
context=self.ctx,
mode=ModelMode.CLASSIFICATION
)
def test_transform(self):
p = self.converter.pmml()
tm = p.TreeModel[0]
assert tm.MiningSchema is not None, 'Missing mining schema'
assert len(tm.MiningSchema.MiningField) == 2, 'Wrong number of mining fields'
assert tm.Node is not None, 'Missing root node'
assert tm.Node.recordCount == 4
assert tm.Node.True_ is not None, 'Root condition should always be True'
def test_transform_with_derived_field(self):
self.est = DecisionTreeClassifier(max_depth=2)
self.est.fit([
[0, 0, 0],
[0, 1, 0],
[1, 0, 0],
[1, 1, 1],
], [0, 1, 1, 1])
mapping = pmml.MapValues(dataType="double", outputColumn="output")
mapping.append(pmml.FieldColumnPair(column="x1", field="x1"))
mapping.append(pmml.FieldColumnPair(column="x2", field="x2"))
it = pmml.InlineTable()
mapping_df = pd.DataFrame([
dict(x1=0, x2='zero', output=0),
dict(x1=0, x2='one', output=0),
dict(x1=1, x2='zero', output=0),
dict(x1=1, x2='one', output=1),
])
for idx, line in mapping_df.iterrows():
it.append(pmml_row(**dict(line)))
mapping.append(it)
mapping_df.set_index(keys=['x1', 'x2'])
mapping_f = np.vectorize(lambda x1, x2: mapping_df.ix[x1, x2].output.values[0])
self.ctx = TransformationContext({
Schema.INPUT: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.DERIVED: [
DerivedFeature(
feature=RealNumericFeature(name='x3'),
transformation=mapping,
function=mapping_f
)
],
Schema.MODEL: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one']),
RealNumericFeature(name='x3')
],
Schema.OUTPUT: [
IntegerCategoricalFeature('output', ['neg', 'pos'])
]
})
self.converter = DecisionTreeConverter(
estimator=self.est,
context=self.ctx,
mode=ModelMode.CLASSIFICATION
)
self.converter.pmml().toxml()
class TestDecisionTreeRegressorConverter(TestCase):
def setUp(self):
np.random.seed(1)
self.est = DecisionTreeRegressor(max_depth=2)
self.est.fit([
[0, 0],
[0, 1],
[1, 0],
[1, 1],
], [0, 1, 1, 1])
self.ctx = TransformationContext({
Schema.INPUT: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.MODEL: [
IntegerNumericFeature('x1'),
StringCategoricalFeature('x2', ['zero', 'one'])
],
Schema.DERIVED: [],
Schema.OUTPUT: [
IntegerNumericFeature('output')
]
})
self.converter = DecisionTreeConverter(
estimator=self.est,
context=self.ctx,
mode=ModelMode.REGRESSION
)
def test_transform(self):
p = self.converter.pmml()
tm = p.TreeModel[0]
assert tm.MiningSchema is not None, 'Missing mining schema'
assert len(tm.MiningSchema.MiningField) == 2, 'Wrong number of mining fields'
assert tm.Node is not None, 'Missing root node'
assert tm.Node.recordCount == 4
assert tm.Node.True_ is not None, 'Root condition should always be True'
class TestDecisionTreeClassificationJPMMLParity(TestCase, JPMMLClassificationTest):
def setUp(self):
self.model = DecisionTreeClassifier(max_depth=2)
self.init_data()
self.converter = DecisionTreeConverter(
estimator=self.model,
context=self.ctx,
mode=ModelMode.CLASSIFICATION
)
@property
def output(self):
return IntegerCategoricalFeature(name=TARGET_NAME, value_list=TARGET)
class TestDecisionTreeRegressionJPMMLParity(TestCase, JPMMLRegressionTest):
def setUp(self):
self.model = DecisionTreeRegressor()
self.init_data()
self.converter = DecisionTreeConverter(
estimator=self.model,
context=self.ctx,
mode=ModelMode.REGRESSION
)
| mit |
leohmoraes/weblate | weblate/trans/management/commands/__init__.py | 10 | 6010 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <http://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Helper classes for management commands.
'''
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from optparse import make_option
from weblate.trans.models import Unit, SubProject, Translation
class WeblateCommand(BaseCommand):
'''
Command which accepts project/component/--all params to process.
'''
args = '<project/component>'
option_list = BaseCommand.option_list + (
make_option(
'--all',
action='store_true',
dest='all',
default=False,
help='process all components'
),
)
def get_units(self, *args, **options):
'''
Returns list of units matching parameters.
'''
if options['all']:
return Unit.objects.all()
return Unit.objects.filter(
translation__subproject__in=self.get_subprojects(*args, **options)
)
def iterate_units(self, *args, **options):
"""
Memory effective iteration over units.
"""
units = self.get_units(*args, **options).order_by('pk')
count = units.count()
if not count:
return
current = 0
last = units.order_by('-pk')[0].pk
done = 0
step = 1000
# Iterate over chunks
while current < last:
self.stdout.write(
'Processing {0:.1f}%'.format(done * 100.0 / count),
)
with transaction.atomic():
step_units = units.filter(
pk__gt=current
)[:step].prefetch_related(
'translation__language',
'translation__subproject',
'translation__subproject__project',
)
for unit in step_units:
current = unit.pk
done += 1
yield unit
self.stdout.write('Operation completed')
def get_translations(self, *args, **options):
'''
Returns list of translations matching parameters.
'''
return Translation.objects.filter(
subproject__in=self.get_subprojects(*args, **options)
)
def get_subprojects(self, *args, **options):
'''
Returns list of components matching parameters.
'''
if options['all']:
# all components
result = SubProject.objects.all()
elif len(args) == 0:
# no argumets to filter projects
self.stderr.write(
'Please specify either --all or <project/component>'
)
raise CommandError('Nothing to process!')
else:
# start with none and add found
result = SubProject.objects.none()
# process arguments
for arg in args:
# do we have also component?
parts = arg.split('/')
# filter by project
found = SubProject.objects.filter(project__slug=parts[0])
# filter by component if available
if len(parts) == 2:
found = found.filter(slug=parts[1])
# warn on no match
if found.count() == 0:
self.stderr.write(
'"%s" did not match any components' % arg
)
raise CommandError('Nothing to process!')
# merge results
result |= found
return result
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
class WeblateLangCommand(WeblateCommand):
'''
Command accepting additional language parameter to filter
list of languages to process.
'''
option_list = WeblateCommand.option_list + (
make_option(
'--lang',
action='store',
type='string',
dest='lang',
default=None,
help='Limit only to given languages (comma separated list)'
),
)
def get_units(self, *args, **options):
'''
Returns list of units matching parameters.
'''
if options['all']:
if options['lang'] is not None:
return Unit.objects.filter(
translation__language__code=options['lang']
)
return Unit.objects.all()
return super(WeblateLangCommand, self).get_units(*args, **options)
def get_translations(self, *args, **options):
'''
Returns list of translations matching parameters.
'''
result = super(WeblateLangCommand, self).get_translations(
*args, **options
)
if options['lang'] is not None:
langs = options['lang'].split(',')
result = result.filter(language_code__in=langs)
return result
def handle(self, *args, **options):
"""
The actual logic of the command. Subclasses must implement
this method.
"""
raise NotImplementedError()
| gpl-3.0 |
DecimalMan/dkp | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
K3D-tools/K3D-jupyter | examples/SageDays74/MD_utils.py | 1 | 2749 | import numpy as np
from numba import double, jit
import k3d
@jit((double[:,:], double[:], double, double))
def lennardjones(U, box, sigma = 0.3405, epsilon=0.9959):
ndim = len(U)
npart = len(U[0])
F = np.zeros((ndim, npart))
Epot = 0.0
Vir = 0.0
C = epsilon/sigma**2*48
rc2 = 6.25
rc2i=1.0/rc2
rc2i *= sigma**2
rc6i=rc2i*rc2i*rc2i
ecut=rc6i*(rc6i-1.0)
for i in range(npart):
for j in range(npart):
if i > j:
X = U[0, j] - U[0, i]
Y = U[1, j] - U[1, i]
Z = U[2, j] - U[2, i]
# Periodic boundary condition
X -= box[0] * np.rint(X/box[0])
Y -= box[1] * np.rint(Y/box[1])
Z -= box[2] * np.rint(Z/box[2])
# Distance squared
r2 = X*X + Y*Y + Z*Z
if(r2 < rc2):
r2i = 1.0 / r2
r2i *= sigma**2 # use nm scale
r6i = r2i*r2i*r2i
Epot = Epot + r6i*(r6i-1.0) - ecut
ftmp = C * r6i*(r6i- 0.5)
Vir += ftmp
ftmp *= r2i
F[0, i] -= ftmp * X
F[1, i] -= ftmp * Y
F[2, i] -= ftmp * Z
F[0, j] += ftmp * X
F[1, j] += ftmp * Y
F[2, j] += ftmp * Z
Epot = Epot * 4.0 * epsilon
return Epot, F, Vir
class simple_molecule_vis(object):
@classmethod
def box_coords(cls, bs = 2.2):
a = bs/2.0
box = np.array([-a,-a,-a] + [-a,a,-a] +[a,a,-a] + [a,-a,-a]+ [-a,-a,-a]+\
[-a,-a,a] + [-a,a,a] +[a,a,a] + [a,-a,a]+ [-a,-a,a]+\
[-a,a,a]+[-a,a,-a]+[-a,a,a]+\
[a,a,a]+[a,a,-a]+[a,a,a]+\
[a,-a,a]+[a,-a,-a]+[a,-a,a])
return box
def update_box(self, bs = 1):
self.box.vertices = self.box_coords(bs=bs)
def __init__(self,bs=1.0):
self.new_plot(bs=bs)
def new_plot(self,bs=1.0):
points_number = 1
positions = 50 * np.random.random_sample((points_number,3)) - 25
colors = np.random.randint(0, 0x777777, points_number)
self.plot = k3d.plot()
self.pkts = k3d.points(positions, colors, point_size=.3)
self.plot += self.pkts
self.plot.camera_auto_fit = False
self.plot.grid_auto_fit = False
self.box = k3d.line(self.box_coords(bs=bs))
self.plot += self.box
def __repr__(self):
self.plot.display()
return "K3D fast molecule viewer"
| mit |
rechie1995/me | python/face_detector.py | 10 | 3348 | #!/usr/bin/python
# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt
#
# This example program shows how to find frontal human faces in an image. In
# particular, it shows how you can take a list of images from the command
# line and display each on the screen with red boxes overlaid on each human
# face.
#
# The examples/faces folder contains some jpg images of people. You can run
# this program on them and see the detections by executing the
# following command:
# ./face_detector.py ../examples/faces/*.jpg
#
# This face detector is made using the now classic Histogram of Oriented
# Gradients (HOG) feature combined with a linear classifier, an image
# pyramid, and sliding window detection scheme. This type of object detector
# is fairly general and capable of detecting many types of semi-rigid objects
# in addition to human faces. Therefore, if you are interested in making
# your own object detectors then read the train_object_detector.py example
# program.
#
#
# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE
# You can install dlib using the command:
# pip install dlib
#
# Alternatively, if you want to compile dlib yourself then go into the dlib
# root folder and run:
# python setup.py install
# or
# python setup.py install --yes USE_AVX_INSTRUCTIONS
# if you have a CPU that supports AVX instructions, since this makes some
# things run faster.
#
# Compiling dlib should work on any operating system so long as you have
# CMake and boost-python installed. On Ubuntu, this can be done easily by
# running the command:
# sudo apt-get install libboost-python-dev cmake
#
# Also note that this example requires scikit-image which can be installed
# via the command:
# pip install scikit-image
# Or downloaded from http://scikit-image.org/download.html.
import sys
import dlib
from skimage import io
detector = dlib.get_frontal_face_detector()
win = dlib.image_window()
for f in sys.argv[1:]:
print("Processing file: {}".format(f))
img = io.imread(f)
# The 1 in the second argument indicates that we should upsample the image
# 1 time. This will make everything bigger and allow us to detect more
# faces.
dets = detector(img, 1)
print("Number of faces detected: {}".format(len(dets)))
for i, d in enumerate(dets):
print("Detection {}: Left: {} Top: {} Right: {} Bottom: {}".format(
i, d.left(), d.top(), d.right(), d.bottom()))
win.clear_overlay()
win.set_image(img)
win.add_overlay(dets)
dlib.hit_enter_to_continue()
# Finally, if you really want to you can ask the detector to tell you the score
# for each detection. The score is bigger for more confident detections.
# The third argument to run is an optional adjustment to the detection threshold,
# where a negative value will return more detections and a positive value fewer.
# Also, the idx tells you which of the face sub-detectors matched. This can be
# used to broadly identify faces in different orientations.
if (len(sys.argv[1:]) > 0):
img = io.imread(sys.argv[1])
dets, scores, idx = detector.run(img, 1, -1)
for i, d in enumerate(dets):
print("Detection {}, score: {}, face_type:{}".format(
d, scores[i], idx[i]))
| gpl-3.0 |
Nau3D/nau | contrib/glbinding-master/source/tests/googletest/googletest/test/googletest-break-on-failure-unittest.py | 122 | 7301 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking googletest-break-on-failure-unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
import os
import gtest_test_utils
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the googletest-break-on-failure-unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'googletest-break-on-failure-unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs googletest-break-on-failure-unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| mit |
GauriGNaik/servo | tests/wpt/css-tests/css-text-decor-3_dev/xhtml1/support/generate-text-emphasis-style-property-tests.py | 841 | 3434 | #!/usr/bin/env python
# - * - coding: UTF-8 - * -
"""
This script generates tests text-emphasis-style-property-011 ~ 020 which
cover all possible values of text-emphasis-style property, except none
and <string>, with horizontal writing mode. It outputs a list of all
tests it generated in the format of Mozilla reftest.list to the stdout.
"""
from __future__ import unicode_literals
TEST_FILE = 'text-emphasis-style-property-{:03}{}.html'
TEST_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Test: text-emphasis-style: {title}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<link rel="help" href="https://drafts.csswg.org/css-text-decor-3/#text-emphasis-style-property">
<meta name="assert" content="'text-emphasis-style: {value}' produces {code} as emphasis marks.">
<link rel="match" href="text-emphasis-style-property-{index:03}-ref.html">
<p>Pass if there is a '{char}' above every character below:</p>
<div style="line-height: 5; text-emphasis-style: {value}">試験テスト</div>
'''
REF_FILE = 'text-emphasis-style-property-{:03}-ref.html'
REF_TEMPLATE = '''<!DOCTYPE html>
<meta charset="utf-8">
<title>CSS Reference: text-emphasis-style: {0}</title>
<link rel="author" title="Xidorn Quan" href="https://www.upsuper.org">
<link rel="author" title="Mozilla" href="https://www.mozilla.org">
<style> rt {{ font-variant-east-asian: inherit; }} </style>
<p>Pass if there is a '{1}' above every character below:</p>
<div style="line-height: 5;"><ruby>試<rt>{1}</rt>験<rt>{1}</rt>テ<rt>{1}</rt>ス<rt>{1}</rt>ト<rt>{1}</rt></ruby></div>
'''
DATA_SET = [
('dot', 0x2022, 0x25e6),
('circle', 0x25cf, 0x25cb),
('double-circle', 0x25c9, 0x25ce),
('triangle', 0x25b2, 0x25b3),
('sesame', 0xfe45, 0xfe46),
]
SUFFIXES = ['', 'a', 'b', 'c', 'd', 'e']
def get_html_entity(code):
return '&#x{:04X};'.format(code)
def write_file(filename, content):
with open(filename, 'wb') as f:
f.write(content.encode('UTF-8'))
def write_test_file(idx, suffix, style, code, name=None):
if not name:
name = style
filename = TEST_FILE.format(idx, suffix)
write_file(filename, TEST_TEMPLATE.format(index=idx, value=style,
char=get_html_entity(code),
code='U+{:04X}'.format(code),
title=name))
print("== {} {}".format(filename, REF_FILE.format(idx)))
idx = 10
def write_files(style, code):
global idx
idx += 1
fill, shape = style
basic_style = "{} {}".format(fill, shape)
write_file(REF_FILE.format(idx),
REF_TEMPLATE.format(basic_style, get_html_entity(code)))
suffix = iter(SUFFIXES)
write_test_file(idx, next(suffix), basic_style, code)
write_test_file(idx, next(suffix), "{} {}".format(shape, fill), code)
if fill == 'filled':
write_test_file(idx, next(suffix), shape, code)
if shape == 'circle':
write_test_file(idx, next(suffix), fill, code, fill + ', horizontal')
print("# START tests from {}".format(__file__))
for name, code, _ in DATA_SET:
write_files(('filled', name), code)
for name, _, code in DATA_SET:
write_files(('open', name), code)
print("# END tests from {}".format(__file__))
| mpl-2.0 |
mikevb1/lagbot | cogs/smash/models/player.py | 2 | 2788 | from dataclasses import dataclass
from collections import deque
from .fighter import Fighter, FakeFighter
@dataclass
class Round:
fighter: Fighter
win: bool = False
def __str__(self):
return '{1}{0}{1}'.format(self.fighter, '__' if self.win else '')
class Player:
def __init__(self, member, game):
self.member = member
self.game = game
self.rounds = []
self.bans = deque()
self.end = False
self.active = True
@property
def current_round(self):
return len(self.rounds) - 1
@property
def wins(self):
return sum(r.win for r in self.rounds)
@property
def latest_win_round(self):
for ind, round_ in enumerate(reversed(self.rounds), 1):
if round_.win:
return len(self.rounds) - ind
return -1
def has_played(self, fighter):
if isinstance(fighter, FakeFighter):
return False
for round_ in self.rounds:
if round_.fighter == fighter:
return True
return False
def has_banned(self, fighter):
return fighter in self.bans
def ban(self, fighter):
self.bans.append(fighter)
def unban(self, fighter):
self.bans.remove(fighter)
def vote_to_end(self):
self.end = not self.end
def play(self, fighter, round_num=None):
if round_num is not None:
round_diff = round_num - self.current_round
if round_diff > 0:
self.rounds.extend(Round(FakeFighter('-')) for _ in range(round_diff))
if self.rounds[round_num].fighter.replace_on_insert:
self.rounds[round_num].fighter = fighter
else:
self.rounds.insert(round_num, Round(fighter))
else:
self.rounds.append(Round(fighter))
def win(self, round_num=None):
if round_num is None:
round_num = self.current_round
try:
round_ = self.rounds[round_num]
except IndexError:
return False
else:
if round_.win:
return False
round_.win = True
return True
def undo(self, remove_action=None, round_num=None):
if not self.rounds:
return False
if round_num is None:
round_num = -1
round_ = self.rounds[round_num]
if remove_action is None and not round_.win:
remove_action = 'play'
else:
try:
round_ = self.rounds[round_num]
except IndexError:
return False
if remove_action == 'play':
self.rounds.pop(round_num)
else:
round_.win = False
return True
| mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/elan/Pools/System Tabs/8_Messaging_Check_Pages_OLD.py | 2 | 8248 | from ImageScripter import *
from elan import *
ScriptSettings.Threshold = .96
#Messaging#################################################
Say("Checking the messaging page")
Configurator.messaging.Click()
#Configurator.messagingpage.SetThreshold(.98)
Configurator.messagingpage.Wait(seconds=10)
Say("Checking the messaging page looks great")
################################################
Configurator.globaloptions.Click()
#Configurator.messagingglobaloptionspage.SetThreshold(.98)
Configurator.messagingglobaloptionspage.Wait(seconds=10)
Configurator.communicationdevices.Click()
#Configurator.messagingcommunicationdevicespage.SetThreshold(.98)
Configurator.messagingcommunicationdevicespage.Wait(seconds=10)
Configurator.telephonesystems.Click()
#Configurator.telephonesystemspage.SetThreshold(.98)
Configurator.telephonesystemspage.Wait(seconds=10)
Configurator.intercomdevices.Click()
#Configurator.intercomdevicespage.SetThreshold(.98)
Configurator.intercomdevicespage.Wait(seconds=10)
Configurator.voicemailboxes.Click()
#Configurator.voicemailboxespage.SetThreshold(.98)
Configurator.voicemailboxespage.Wait(seconds=10)
Configurator.house.Click()
#Configurator.housepage.SetThreshold(.98)
Configurator.housepage.Wait(seconds=10)
Configurator.emailaccounts.Click()
#Configurator.emailaccountspage.SetThreshold(.98)
Configurator.emailaccountspage.Wait(seconds=10)
Configurator.emailmessagesoutbound.Click()
#Configurator.emailmessagesoutboundpage.SetThreshold(.98)
Configurator.emailmessagesoutboundpage.Wait(seconds=10)
Configurator.remotesystems.Click()
#Configurator.remotesystemspage.SetThreshold(.98)
Configurator.remotesystemspage.Wait(seconds=10)
Configurator.doorbell.Click()
#Configurator.doorbellpage.SetThreshold(.98)
Configurator.doorbellpage.Wait(seconds=10)
Configurator.pushmessages.Click()
#Configurator.pushmessagespage.SetThreshold(.98)
Configurator.pushmessagespage.Wait(seconds=10)
Configurator.calendars.Click()
#Configurator.calendarspage.SetThreshold(.98)
Configurator.calendarspage.Wait(seconds=10)
Configurator.calendargroups.Click()
#onfigurator.calendargroupspage.SetThreshold(.98)
Configurator.calendargroupspage.Wait(seconds=10)
Configurator.custompages.Click()
#Configurator.messagingcustompagespage.SetThreshold(.98)
Configurator.messagingcustompagespage.Wait(seconds=10)
###Messaging Right Click
Configurator.communicationdevices.RightClick()
Configurator.communicationdevicesrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.telephonesystems.RightClick()
Configurator.telephonesystemsrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.voicemailboxes.RightClick()
Configurator.voicemailboxesrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
try:
Configurator.house.RightClick()
except:
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.house.RightClick()
Configurator.houserightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
try:
Configurator.emailaccounts.RightClick()
except:
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.emailaccounts.RightClick()
Configurator.emailaccountsrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
try:
Configurator.emailmessagesoutbound.RightClick()
except:
Configurator.system.Click()
Configurator.messaging.Click()
sleep(5)
Configurator.emailmessagesoutbound.RightClick()
Configurator.emailmessagesoutboundrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.remotesystem.RightClick()
Configurator.remotesystemrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.doorbell.RightClick()
Configurator.doorbellrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.pushmessages.RightClick()
Configurator.pushmessagesrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.calendars.RightClick()
Configurator.calendarsrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
###FIX
Configurator.system.RealClick()
Configurator.messaging.RealClick()
Configurator.calendargroups.RightClick()
Configurator.calendargroupsrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
Configurator.custompages.RightClick()
Configurator.custompagesrightclickmenu.Wait(seconds=10)
Configurator.grey.Click()
sleep(3)
Configurator.irrigation.Click()
Configurator.system.Click()
#######################################WINDOW##############################
############################Communication DEvice
#Configurator.system.Click()
Configurator.messaging.Click()
Configurator.communicationdevices.RightClick()
Configurator.addnewcommunicationdevice.RealClick()
Configurator.addnewcommunicationdevicewindow.Wait()
Add.PushButton.Click('Cancel')
############################Telephone System
Press('enter')
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.telephonesystems.RightClick()
Configurator.addnewdevice.RealClick()
Configurator.addnewdevicewindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
####
############################Voice Mail Boxes
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.voicemailboxes.RightClick()
Configurator.addnewvoicemailbox.RealClick()
Configurator.addnewvoicemailboxwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################House#############
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.house.RightClick()
Configurator.addnewvoicemailbox.RealClick()
Configurator.addnewvoicemailboxwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Email Accounts###
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.emailsccounts.RightClick()
Configurator.addnewemailaccount.RealClick()
Configurator.addnewemailaccountwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################email messages outbound
Configurator.system.Click()####################
Configurator.messaging.Click()
Configurator.emailmessagesoutbound.RightClick()
Configurator.addnewemailmessage.RealClick()
Configurator.addnewemailmessagewindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Remote System####################
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.remotesystem.RightClick()
Configurator.addnewremotesystem.RealClick()
Configurator.addnewremotesystemwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
###########################
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.remotesystem.RightClick()
Configurator.addnewremoteintercomtarget.RealClick()
Configurator.addnewremoteintercomtargetwindow.Wait(threshold = .92)
Add.PushButton.Click('Cancel')
Press('enter')
############################Door Bell
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.doorbell.RightClick()
Configurator.addnewdevice.RealClick()
Configurator.addnewdevicewindowbell.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Push Messages
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.pushmessages.RightClick()
Configurator.addnewpushmessage.RealClick()
Configurator.addnewpushmessagewindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Calendars
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.calendars.RightClick()
Configurator.addnewcalendaraccount.RealClick()
Configurator.addnewcalendaraccountwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Calendar Groups
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.calendargroups.RightClick()
Configurator.addnewcalendargroup.RealClick()
Configurator.addnewcalendargroupwindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
############################Custom PAge
Configurator.system.Click()
Configurator.messaging.Click()
Configurator.custompages.RightClick()
Configurator.addnewcustompage.RealClick()
Configurator.addnewcustompagewindow.Wait()
Add.PushButton.Click('Cancel')
Press('enter')
###Reset
Configurator.media.Click()
Configurator.system.Click() | gpl-3.0 |
OpenUpgrade/OpenUpgrade | openerp/addons/base/res/__init__.py | 384 | 1261 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import res_country
import res_lang
import res_partner
import res_bank
import res_config
import res_currency
import res_font
import res_company
import res_users
import res_request
import res_lang
import ir_property
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
chrisnorman7/helper | server.py | 1 | 1695 | """Run the helper as a server."""
import logging
from twisted.internet import reactor, protocol, task
from twisted.protocols.basic import LineReceiver
from helper.core import App
logging.basicConfig(level='INFO')
app = App()
app.protocol = None
def tick():
"""See if we need to quit."""
while app.running:
yield app.tick()
reactor.stop()
class ServerProtocol(LineReceiver):
"""Send any lines received to the app."""
def lineReceived(self, line):
"""Send the line to the app."""
line = line.decode()
logging.info('Parse("%s")', line)
app.parse(line)
def connectionMade(self):
if app.protocol is not None:
app.protocol.transport.loseConnection()
app.protocol = self
app.output_callback = self.handle_output
def connectionLost(self, reason):
"""Clear the app's protocol attribute."""
logging.info('Connection lost: %s.', reason.getErrorMessage())
app.protocol = None
app.output_callback = None
def handle_output(self, line):
if line is not None:
self.sendLine(line.encode())
class ServerFactory(protocol.ServerFactory):
"""Dish out connections."""
def buildProtocol(self, addr):
logging.info('Incoming connection from %s:%d.', addr.host, addr.port)
return ServerProtocol()
if __name__ == '__main__':
logging.info('Server starting.')
tick_task = task.coiterate(tick())
logging.info('Tick task started as %r.', tick_task)
reactor.listenTCP(4357, ServerFactory(), interface='127.0.0.1')
reactor.run()
app.finish()
| mpl-2.0 |
rusenask/stubo-app | stubo/ext/tests/test_xmlexit.py | 5 | 15769 | import unittest
from stubo.ext.xmlutils import XPathValue
from stubo.testing import DummyModel
from stubo.model.request import StuboRequest
class TestXMLManglerPutStub(unittest.TestCase):
def _make(self, **kwargs):
from stubo.ext.xmlexit import XMLManglerPutStub
return XMLManglerPutStub(**kwargs)
def test_ctor(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>xyz</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
putter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
self.assertEqual(putter.mangler, mangler)
def test_matcher(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>xyz</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
putter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
response = putter.doMatcher()
self.assertEqual(response.stub.contains_matchers(),
[u'<path><to><a>***</a></to></path>'])
def test_matcher_strip_ns(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path xmlns="http://great.com"><to><a>xyz</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
putter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
result = putter.doMatcher()
self.assertEqual(result.stub.contains_matchers(),
[u'<path><to><a>***</a></to></path>'])
def test_matcher2(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a',
extractor=lambda x: x[1:-1])))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>xyz</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
putter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
response = putter.doMatcher()
self.assertEqual(response.stub.contains_matchers(),
[u'<path><to><a>y</a></to></path>'])
class TestPutStubMangleResponse(unittest.TestCase):
def _make(self, **kwargs):
from stubo.ext.xmlexit import PutStubMangleResponse
return PutStubMangleResponse(**kwargs)
def test_ctor(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
response_mangler = XMLMangler(elements=dict(a=XPathValue('/response', extractor=lambda x: x)),
copy_attrs_on_match=True)
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>xyz</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
putter = self._make(response_mangler=response_mangler,
mangler=mangler, request=StuboRequest(request),
context=context)
self.assertEqual(putter.mangler, mangler)
self.assertEqual(putter.response_mangler, response_mangler)
def test_ctor_fails_if_xpath_has_no_extractor(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
response_mangler = XMLMangler(elements=dict(a=XPathValue('/response')),
copy_attrs_on_match=True)
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>xyz</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
with self.assertRaises(ValueError):
self._make(response_mangler=response_mangler,
mangler=mangler, request=StuboRequest(request),
context=context)
def test_response(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
response_mangler = XMLMangler(elements=dict(a=XPathValue('/response', extractor=lambda x: x.upper())),
copy_attrs_on_match=True)
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>xyz</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
putter = self._make(response_mangler=response_mangler,
mangler=mangler, request=StuboRequest(request),
context=context)
putter.doMatcher()
response = putter.doResponse()
self.assertEqual(response.stub.payload,
{'request': {'bodyPatterns': {'contains': [u'<path><to><a>***</a></to></path>']},
'method': 'POST'},
'response': {'body': u'<response>ABC</response>', 'status': 200}})
def test_response_namespace(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
response_mangler = XMLMangler(elements=dict(a=XPathValue('//user:response', extractor=lambda x: x.upper())),
copy_attrs_on_match=True,
namespaces=dict(user="http://www.my.com/userschema"))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>xyz</a></to></path>',
'<x xmlns:user="http://www.my.com/userschema"><user:response>abc</user:response></x>'),
"foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
putter = self._make(response_mangler=response_mangler,
mangler=mangler, request=StuboRequest(request),
context=context)
putter.doMatcher()
response = putter.doResponse()
self.assertEqual(response.stub.payload,
{'request': {'bodyPatterns': {'contains': [u'<path><to><a>***</a></to></path>']},
'method': 'POST'},
'response': {
'body': u'<x xmlns:user="http://www.my.com/userschema">'
u'<user:response>ABC</user:response></x>',
'status': 200}})
class TestXMLManglerGetResponse(unittest.TestCase):
def _make(self, **kwargs):
from stubo.ext.xmlexit import XMLManglerGetResponse
return XMLManglerGetResponse(**kwargs)
def test_ctor(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>xyz</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
getter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
self.assertEqual(getter.mangler, mangler)
def test_matcher(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>***</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
getter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
response = getter.doMatcher()
self.assertEqual(response.stub.contains_matchers(),
[u'<path><to><a>xyz</a></to></path>'])
def test_matcher_with_request_ns(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
request = DummyModel(body='<path xmlns="http://great.com"><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>***</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
getter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
result = getter.doMatcher()
self.assertEqual(result.stub.contains_matchers(),
[u'<path><to><a>xyz</a></to></path>'])
def test_matcher2(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a',
extractor=lambda x: x[1:-1])))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>y</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
getter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
response = getter.doMatcher()
self.assertEqual(response.stub.contains_matchers(),
[u'<path><to><a>y</a></to></path>'])
def test_matcher_request(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>***</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
getter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
response = getter.doMatcherRequest()
self.assertEqual(response.stub.contains_matchers(),
[u'<path><to><a>***</a></to></path>'])
self.assertEqual(response.request.request_body(),
'<path><to><a>xyz</a></to></path>')
def test_matcher_request_with_ns(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a')))
request = DummyModel(body='<path xmlns="http://great.com"><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>***</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
getter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
result = getter.doMatcherRequest()
self.assertEqual(result.stub.contains_matchers(),
[u'<path><to><a>***</a></to></path>'])
self.assertEqual(result.request.request_body(),
'<path><to><a>xyz</a></to></path>')
def test_matcher_request2(self):
from stubo.ext.xmlutils import XMLMangler
mangler = XMLMangler(elements=dict(a=XPathValue('/path/to/a',
extractor=lambda x: x[1:-1])))
request = DummyModel(body='<path><to><a>xyz</a></to></path>',
headers={})
from stubo.model.stub import Stub, create
stub = Stub(create('<path><to><a>***</a></to></path>',
'<response>abc</response>'), "foo")
from stubo.ext.transformer import StuboTemplateProcessor
context = dict(stub=stub, template_processor=StuboTemplateProcessor())
getter = self._make(mangler=mangler, request=StuboRequest(request),
context=context)
response = getter.doMatcherRequest()
self.assertEqual(response.stub.contains_matchers(),
[u'<path><to><a>***</a></to></path>'])
self.assertEqual(response.request.request_body(),
'<path><to><a>y</a></to></path>')
| gpl-3.0 |
gkroch/mbed-os | tools/config.py | 2 | 40308 | """
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from copy import deepcopy
import os
import sys
from collections import namedtuple
from os.path import splitext
from intelhex import IntelHex
# Implementation of mbed configuration mechanism
from tools.utils import json_file_to_dict, intelhex_offset
from tools.arm_pack_manager import Cache
from tools.targets import CUMULATIVE_ATTRIBUTES, TARGET_MAP, \
generate_py_target, get_resolution_order
# Base class for all configuration exceptions
class ConfigException(Exception):
"""Config system only exception. Makes it easier to distinguish config
errors"""
pass
class ConfigParameter(object):
"""This class keeps information about a single configuration parameter"""
def __init__(self, name, data, unit_name, unit_kind):
"""Construct a ConfigParameter
Positional arguments:
name - the name of the configuration parameter
data - the data associated with the configuration parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_ kind - the kind of the unit ("target", "library" or "application")
"""
self.name = self.get_full_name(name, unit_name, unit_kind,
allow_prefix=False)
self.defined_by = self.get_display_name(unit_name, unit_kind)
self.set_value(data.get("value", None), unit_name, unit_kind)
self.help_text = data.get("help", None)
self.required = data.get("required", False)
self.macro_name = data.get("macro_name", "MBED_CONF_%s" %
self.sanitize(self.name.upper()))
self.config_errors = []
@staticmethod
def get_full_name(name, unit_name, unit_kind, label=None,
allow_prefix=True):
"""Return the full (prefixed) name of a parameter. If the parameter
already has a prefix, check if it is valid
Positional arguments:
name - the simple (unqualified) name of the parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
allow_prefix - True to allow the original name to have a prefix, False
otherwise
"""
if name.find('.') == -1: # the name is not prefixed
if unit_kind == "target":
prefix = "target."
elif unit_kind == "application":
prefix = "app."
else:
prefix = unit_name + '.'
return prefix + name
# The name has a prefix, so check if it is valid
if not allow_prefix:
raise ConfigException("Invalid parameter name '%s' in '%s'" %
(name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
temp = name.split(".")
# Check if the parameter syntax is correct (must be
# unit_name.parameter_name)
if len(temp) != 2:
raise ConfigException("Invalid parameter name '%s' in '%s'" %
(name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
prefix = temp[0]
# Check if the given parameter prefix matches the expected prefix
if (unit_kind == "library" and prefix != unit_name) or \
(unit_kind == "target" and prefix != "target"):
raise ConfigException(
"Invalid prefix '%s' for parameter name '%s' in '%s'" %
(prefix, name, ConfigParameter.get_display_name(
unit_name, unit_kind, label)))
return name
@staticmethod
def get_display_name(unit_name, unit_kind, label=None):
"""Return the name displayed for a unit when interrogating the origin
and the last set place of a parameter
Positional arguments:
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
"""
if unit_kind == "target":
return "target:" + unit_name
elif unit_kind == "application":
return "application%s" % ("[%s]" % label if label else "")
else: # library
return "library:%s%s" % (unit_name, "[%s]" % label if label else "")
@staticmethod
def sanitize(name):
""" "Sanitize" a name so that it is a valid C macro name. Currently it
simply replaces '.' and '-' with '_'.
Positional arguments:
name - the name to make into a valid C macro
"""
return name.replace('.', '_').replace('-', '_')
def set_value(self, value, unit_name, unit_kind, label=None):
""" Sets a value for this parameter, remember the place where it was
set. If the value is a Boolean, it is converted to 1 (for True) or
to 0 (for False).
Positional arguments:
value - the value of the parameter
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
Keyword arguments:
label - the name of the label in the 'target_config_overrides' section
(optional)
"""
self.value = int(value) if isinstance(value, bool) else value
self.set_by = self.get_display_name(unit_name, unit_kind, label)
def __str__(self):
"""Return the string representation of this configuration parameter
Arguments: None
"""
if self.value is not None:
return '%s = %s (macro name: "%s")' % \
(self.name, self.value, self.macro_name)
else:
return '%s has no value' % self.name
def get_verbose_description(self):
"""Return a verbose description of this configuration parameter as a
string
Arguments: None
"""
desc = "Name: %s%s\n" % \
(self.name, " (required parameter)" if self.required else "")
if self.help_text:
desc += " Description: %s\n" % self.help_text
desc += " Defined by: %s\n" % self.defined_by
if not self.value:
return desc + " No value set"
desc += " Macro name: %s\n" % self.macro_name
desc += " Value: %s (set by %s)" % (self.value, self.set_by)
return desc
class ConfigMacro(object):
""" A representation of a configuration macro. It handles both macros
without a value (MACRO) and with a value (MACRO=VALUE)
"""
def __init__(self, name, unit_name, unit_kind):
"""Construct a ConfigMacro object
Positional arguments:
name - the macro's name
unit_name - the location where the macro was defined
unit_kind - the type of macro this is
"""
self.name = name
self.defined_by = ConfigParameter.get_display_name(unit_name, unit_kind)
if name.find("=") != -1:
tmp = name.split("=")
if len(tmp) != 2:
raise ValueError("Invalid macro definition '%s' in '%s'" %
(name, self.defined_by))
self.macro_name = tmp[0]
self.macro_value = tmp[1]
else:
self.macro_name = name
self.macro_value = None
class ConfigCumulativeOverride(object):
"""Representation of overrides for cumulative attributes"""
def __init__(self, name, additions=None, removals=None, strict=False):
"""Construct a ConfigCumulativeOverride object
Positional arguments:
name - the name of the config file this came from ?
Keyword arguments:
additions - macros to add to the overrides
removals - macros to remove from the overrides
strict - Boolean indicating that attempting to remove from an override
that does not exist should error
"""
self.name = name
if additions:
self.additions = set(additions)
else:
self.additions = set()
if removals:
self.removals = set(removals)
else:
self.removals = set()
self.strict = strict
def remove_cumulative_overrides(self, overrides):
"""Extend the list of override removals.
Positional arguments:
overrides - a list of names that, when the override is evaluated, will
be removed
"""
for override in overrides:
if override in self.additions:
raise ConfigException(
"Configuration conflict. The %s %s both added and removed."
% (self.name[:-1], override))
self.removals |= set(overrides)
def add_cumulative_overrides(self, overrides):
"""Extend the list of override additions.
Positional arguments:
overrides - a list of a names that, when the override is evaluated, will
be added to the list
"""
for override in overrides:
if override in self.removals or \
(self.strict and override not in self.additions):
raise ConfigException(
"Configuration conflict. The %s %s both added and removed."
% (self.name[:-1], override))
self.additions |= set(overrides)
def strict_cumulative_overrides(self, overrides):
"""Remove all overrides that are not the specified ones
Positional arguments:
overrides - a list of names that will replace the entire attribute when
this override is evaluated.
"""
self.remove_cumulative_overrides(self.additions - set(overrides))
self.add_cumulative_overrides(overrides)
self.strict = True
def update_target(self, target):
"""Update the attributes of a target based on this override"""
setattr(target, self.name,
list((set(getattr(target, self.name, []))
| self.additions) - self.removals))
def _process_config_parameters(data, params, unit_name, unit_kind):
"""Process a "config_parameters" section in either a target, a library,
or the application.
Positional arguments:
data - a dictionary with the configuration parameters
params - storage for the discovered configuration parameters
unit_name - the unit (target/library/application) that defines this
parameter
unit_kind - the kind of the unit ("target", "library" or "application")
"""
for name, val in data.items():
full_name = ConfigParameter.get_full_name(name, unit_name, unit_kind)
# If the parameter was already defined, raise an error
if full_name in params:
raise ConfigException(
"Parameter name '%s' defined in both '%s' and '%s'" %
(name, ConfigParameter.get_display_name(unit_name, unit_kind),
params[full_name].defined_by))
# Otherwise add it to the list of known parameters
# If "val" is not a dictionary, this is a shortcut definition,
# otherwise it is a full definition
params[full_name] = ConfigParameter(name, val if isinstance(val, dict)
else {"value": val}, unit_name,
unit_kind)
return params
def _process_macros(mlist, macros, unit_name, unit_kind):
"""Process a macro definition and check for incompatible duplicate
definitions.
Positional arguments:
mlist - list of macro names to process
macros - dictionary with currently discovered macros
unit_name - the unit (library/application) that defines this macro
unit_kind - the kind of the unit ("library" or "application")
"""
for mname in mlist:
macro = ConfigMacro(mname, unit_name, unit_kind)
if (macro.macro_name in macros) and \
(macros[macro.macro_name].name != mname):
# Found an incompatible definition of the macro in another module,
# so raise an error
full_unit_name = ConfigParameter.get_display_name(unit_name,
unit_kind)
raise ConfigException(
("Macro '%s' defined in both '%s' and '%s'"
% (macro.macro_name, macros[macro.macro_name].defined_by,
full_unit_name)) +
" with incompatible values")
macros[macro.macro_name] = macro
def check_dict_types(dict, type_dict, dict_loc):
for key, value in dict.iteritems():
if not isinstance(value, type_dict[key]):
raise ConfigException("The value of %s.%s is not of type %s" %
(dict_loc, key, type_dict[key].__name__))
Region = namedtuple("Region", "name start size active filename")
class Config(object):
"""'Config' implements the mbed configuration mechanism"""
# Libraries and applications have different names for their configuration
# files
__mbed_app_config_name = "mbed_app.json"
__mbed_lib_config_name = "mbed_lib.json"
# Allowed keys in configuration dictionaries, and their types
# (targets can have any kind of keys, so this validation is not applicable
# to them)
__allowed_keys = {
"library": {"name": str, "config": dict, "target_overrides": dict,
"macros": list, "__config_path": str},
"application": {"config": dict, "target_overrides": dict,
"macros": list, "__config_path": str}
}
__unused_overrides = set(["target.bootloader_img", "target.restrict_size"])
# Allowed features in configurations
__allowed_features = [
"UVISOR", "BLE", "CLIENT", "IPV4", "LWIP", "COMMON_PAL", "STORAGE", "NANOSTACK",
# Nanostack configurations
"LOWPAN_BORDER_ROUTER", "LOWPAN_HOST", "LOWPAN_ROUTER", "NANOSTACK_FULL", "THREAD_BORDER_ROUTER", "THREAD_END_DEVICE", "THREAD_ROUTER", "ETHERNET_HOST"
]
def __init__(self, tgt, top_level_dirs=None, app_config=None):
"""Construct a mbed configuration
Positional arguments:
target - the name of the mbed target used for this configuration
instance
Keyword argumets:
top_level_dirs - a list of top level source directories (where
mbed_app_config.json could be found)
app_config - location of a chosen mbed_app.json file
NOTE: Construction of a Config object will look for the application
configuration file in top_level_dirs. If found once, it'll parse it.
top_level_dirs may be None (in this case, the constructor will not
search for a configuration file).
"""
config_errors = []
app_config_location = app_config
if app_config_location is None:
for directory in top_level_dirs or []:
full_path = os.path.join(directory, self.__mbed_app_config_name)
if os.path.isfile(full_path):
if app_config_location is not None:
raise ConfigException("Duplicate '%s' file in '%s' and '%s'"
% (self.__mbed_app_config_name,
app_config_location, full_path))
else:
app_config_location = full_path
try:
self.app_config_data = json_file_to_dict(app_config_location) \
if app_config_location else {}
except ValueError as exc:
self.app_config_data = {}
config_errors.append(
ConfigException("Could not parse mbed app configuration from %s"
% app_config_location))
# Check the keys in the application configuration data
unknown_keys = set(self.app_config_data.keys()) - \
set(self.__allowed_keys["application"].keys())
if unknown_keys:
raise ConfigException("Unknown key(s) '%s' in %s" %
(",".join(unknown_keys),
self.__mbed_app_config_name))
check_dict_types(self.app_config_data, self.__allowed_keys["application"],
"app-config")
# Update the list of targets with the ones defined in the application
# config, if applicable
self.lib_config_data = {}
# Make sure that each config is processed only once
self.processed_configs = {}
if isinstance(tgt, basestring):
if tgt in TARGET_MAP:
self.target = TARGET_MAP[tgt]
else:
self.target = generate_py_target(
self.app_config_data.get("custom_targets", {}), tgt)
else:
self.target = tgt
self.target = deepcopy(self.target)
self.target_labels = self.target.labels
self.cumulative_overrides = {key: ConfigCumulativeOverride(key)
for key in CUMULATIVE_ATTRIBUTES}
self._process_config_and_overrides(self.app_config_data, {}, "app",
"application")
self.config_errors = config_errors
def add_config_files(self, flist):
"""Add configuration files
Positional arguments:
flist - a list of files to add to this configuration
"""
for config_file in flist:
if not config_file.endswith(self.__mbed_lib_config_name):
continue
full_path = os.path.normpath(os.path.abspath(config_file))
# Check that we didn't already process this file
if self.processed_configs.has_key(full_path):
continue
self.processed_configs[full_path] = True
# Read the library configuration and add a "__full_config_path"
# attribute to it
try:
cfg = json_file_to_dict(config_file)
except ValueError as exc:
sys.stderr.write(str(exc) + "\n")
continue
cfg["__config_path"] = full_path
if "name" not in cfg:
raise ConfigException(
"Library configured at %s has no name field." % full_path)
# If there's already a configuration for a module with the same
# name, exit with error
if self.lib_config_data.has_key(cfg["name"]):
raise ConfigException(
"Library name '%s' is not unique (defined in '%s' and '%s')"
% (cfg["name"], full_path,
self.lib_config_data[cfg["name"]]["__config_path"]))
self.lib_config_data[cfg["name"]] = cfg
@property
def has_regions(self):
"""Does this config have regions defined?"""
if 'target_overrides' in self.app_config_data:
target_overrides = self.app_config_data['target_overrides'].get(
self.target.name, {})
return ('target.bootloader_img' in target_overrides or
'target.restrict_size' in target_overrides)
else:
return False
@property
def regions(self):
"""Generate a list of regions from the config"""
if not self.target.bootloader_supported:
raise ConfigException("Bootloader not supported on this target.")
cmsis_part = Cache(False, False).index[self.target.device_name]
start = 0
target_overrides = self.app_config_data['target_overrides'].get(
self.target.name, {})
try:
rom_size = int(cmsis_part['memory']['IROM1']['size'], 0)
rom_start = int(cmsis_part['memory']['IROM1']['start'], 0)
except KeyError:
raise ConfigException("Not enough information in CMSIS packs to "
"build a bootloader project")
if 'target.bootloader_img' in target_overrides:
filename = target_overrides['target.bootloader_img']
part = intelhex_offset(filename, offset=rom_start)
if part.minaddr() != rom_start:
raise ConfigException("bootloader executable does not "
"start at 0x%x" % rom_start)
part_size = (part.maxaddr() - part.minaddr()) + 1
yield Region("bootloader", rom_start + start, part_size, False,
filename)
start += part_size
if 'target.restrict_size' in target_overrides:
new_size = int(target_overrides['target.restrict_size'], 0)
yield Region("application", rom_start + start, new_size, True, None)
start += new_size
yield Region("post_application", rom_start +start, rom_size - start,
False, None)
else:
yield Region("application", rom_start + start, rom_size - start,
True, None)
if start > rom_size:
raise ConfigException("Not enough memory on device to fit all "
"application regions")
def _process_config_and_overrides(self, data, params, unit_name, unit_kind):
"""Process "config_parameters" and "target_config_overrides" into a
given dictionary
Positional arguments:
data - the configuration data of the library/appliation
params - storage for the discovered configuration parameters
unit_name - the unit (library/application) that defines this parameter
unit_kind - the kind of the unit ("library" or "application")
"""
self.config_errors = []
_process_config_parameters(data.get("config", {}), params, unit_name,
unit_kind)
for label, overrides in data.get("target_overrides", {}).items():
# If the label is defined by the target or it has the special value
# "*", process the overrides
if (label == '*') or (label in self.target_labels):
# Check for invalid cumulative overrides in libraries
if (unit_kind == 'library' and
any(attr.startswith('target.extra_labels') for attr
in overrides.iterkeys())):
raise ConfigException(
"Target override 'target.extra_labels' in " +
ConfigParameter.get_display_name(unit_name, unit_kind,
label) +
" is only allowed at the application level")
# Parse out cumulative overrides
for attr, cumulatives in self.cumulative_overrides.iteritems():
if 'target.'+attr in overrides:
key = 'target.' + attr
if not isinstance(overrides[key], list):
raise ConfigException(
"The value of %s.%s is not of type %s" %
(unit_name, "target_overrides." + key,
"list"))
cumulatives.strict_cumulative_overrides(overrides[key])
del overrides[key]
if 'target.'+attr+'_add' in overrides:
key = 'target.' + attr + "_add"
if not isinstance(overrides[key], list):
raise ConfigException(
"The value of %s.%s is not of type %s" %
(unit_name, "target_overrides." + key,
"list"))
cumulatives.add_cumulative_overrides(overrides[key])
del overrides[key]
if 'target.'+attr+'_remove' in overrides:
key = 'target.' + attr + "_remove"
if not isinstance(overrides[key], list):
raise ConfigException(
"The value of %s.%s is not of type %s" %
(unit_name, "target_overrides." + key,
"list"))
cumulatives.remove_cumulative_overrides(overrides[key])
del overrides[key]
# Consider the others as overrides
for name, val in overrides.items():
# Get the full name of the parameter
full_name = ConfigParameter.get_full_name(name, unit_name,
unit_kind, label)
if full_name in params:
params[full_name].set_value(val, unit_name, unit_kind,
label)
elif name in self.__unused_overrides:
pass
else:
self.config_errors.append(
ConfigException(
"Attempt to override undefined parameter" +
(" '%s' in '%s'"
% (full_name,
ConfigParameter.get_display_name(unit_name,
unit_kind,
label)))))
for cumulatives in self.cumulative_overrides.itervalues():
cumulatives.update_target(self.target)
return params
def get_target_config_data(self):
"""Read and interpret configuration data defined by targets.
We consider the resolution order for our target and sort it by level
reversed, so that we first look at the top level target (the parent),
then its direct children, then the children of those children and so on,
until we reach self.target
TODO: this might not work so well in some multiple inheritance scenarios
At each step, look at two keys of the target data:
- config_parameters: used to define new configuration parameters
- config_overrides: used to override already defined configuration
parameters
Arguments: None
"""
params, json_data = {}, self.target.json_data
resolution_order = [e[0] for e
in sorted(
self.target.resolution_order,
key=lambda e: e[1], reverse=True)]
for tname in resolution_order:
# Read the target data directly from its description
target_data = json_data[tname]
# Process definitions first
_process_config_parameters(target_data.get("config", {}), params,
tname, "target")
# Then process overrides
for name, val in target_data.get("overrides", {}).items():
full_name = ConfigParameter.get_full_name(name, tname, "target")
# If the parameter name is not defined or if there isn't a path
# from this target to the target where the parameter was defined
# in the target inheritance tree, raise an error We need to use
# 'defined_by[7:]' to remove the "target:" prefix from
# defined_by
rel_names = [tgt for tgt, _ in
get_resolution_order(self.target.json_data, tname,
[])]
if full_name in self.__unused_overrides:
continue
if (full_name not in params) or \
(params[full_name].defined_by[7:] not in rel_names):
raise ConfigException(
"Attempt to override undefined parameter '%s' in '%s'"
% (name,
ConfigParameter.get_display_name(tname, "target")))
# Otherwise update the value of the parameter
params[full_name].set_value(val, tname, "target")
return params
def get_lib_config_data(self):
""" Read and interpret configuration data defined by libraries. It is
assumed that "add_config_files" above was already called and the library
configuration data exists in self.lib_config_data
Arguments: None
"""
all_params, macros = {}, {}
for lib_name, lib_data in self.lib_config_data.items():
unknown_keys = (set(lib_data.keys()) -
set(self.__allowed_keys["library"].keys()))
if unknown_keys:
raise ConfigException("Unknown key(s) '%s' in %s" %
(",".join(unknown_keys), lib_name))
check_dict_types(lib_data, self.__allowed_keys["library"], lib_name)
all_params.update(self._process_config_and_overrides(lib_data, {},
lib_name,
"library"))
_process_macros(lib_data.get("macros", []), macros, lib_name,
"library")
return all_params, macros
def get_app_config_data(self, params, macros):
""" Read and interpret the configuration data defined by the target. The
target can override any configuration parameter, as well as define its
own configuration data.
Positional arguments.
params - the dictionary with configuration parameters found so far (in
the target and in libraries)
macros - the list of macros defined in the configuration
"""
app_cfg = self.app_config_data
# The application can have a "config_parameters" and a
# "target_config_overrides" section just like a library
self._process_config_and_overrides(app_cfg, params, "app",
"application")
# The application can also defined macros
_process_macros(app_cfg.get("macros", []), macros, "app",
"application")
def get_config_data(self):
""" Return the configuration data in two parts: (params, macros)
params - a dictionary with mapping a name to a ConfigParam
macros - the list of macros defined with "macros" in libraries and in
the application (as ConfigMacro instances)
Arguments: None
"""
all_params = self.get_target_config_data()
lib_params, macros = self.get_lib_config_data()
all_params.update(lib_params)
self.get_app_config_data(all_params, macros)
return all_params, macros
@staticmethod
def _check_required_parameters(params):
"""Check that there are no required parameters without a value
Positional arguments:
params - the list of parameters to check
NOTE: This function does not return. Instead, it throws a
ConfigException when any of the required parameters are missing values
"""
for param in params.values():
if param.required and (param.value is None):
raise ConfigException("Required parameter '" + param.name +
"' defined by '" + param.defined_by +
"' doesn't have a value")
@staticmethod
def parameters_to_macros(params):
""" Encode the configuration parameters as C macro definitions.
Positional arguments:
params - a dictionary mapping a name to a ConfigParameter
Return: a list of strings that encode the configuration parameters as
C pre-processor macros
"""
return ['%s=%s' % (m.macro_name, m.value) for m in params.values()
if m.value is not None]
@staticmethod
def config_macros_to_macros(macros):
""" Return the macro definitions generated for a dictionary of
ConfigMacros (as returned by get_config_data).
Positional arguments:
params - a dictionary mapping a name to a ConfigMacro instance
Return: a list of strings that are the C pre-processor macros
"""
return [m.name for m in macros.values()]
@staticmethod
def config_to_macros(config):
"""Convert the configuration data to a list of C macros
Positional arguments:
config - configuration data as (ConfigParam instances, ConfigMacro
instances) tuple (as returned by get_config_data())
"""
params, macros = config[0], config[1]
Config._check_required_parameters(params)
return Config.config_macros_to_macros(macros) + \
Config.parameters_to_macros(params)
def get_config_data_macros(self):
""" Convert a Config object to a list of C macros
Arguments: None
"""
return self.config_to_macros(self.get_config_data())
def get_features(self):
""" Extract any features from the configuration data
Arguments: None
"""
params, _ = self.get_config_data()
self._check_required_parameters(params)
self.cumulative_overrides['features']\
.update_target(self.target)
for feature in self.target.features:
if feature not in self.__allowed_features:
raise ConfigException(
"Feature '%s' is not a supported features" % feature)
return self.target.features
def validate_config(self):
""" Validate configuration settings. This either returns True or
raises an exception
Arguments: None
"""
if self.config_errors:
raise self.config_errors[0]
return True
def load_resources(self, resources):
""" Load configuration data from a Resources instance and expand it
based on defined features.
Positional arguments:
resources - the resources object to load from and expand
"""
# Update configuration files until added features creates no changes
prev_features = set()
self.validate_config()
while True:
# Add/update the configuration with any .json files found while
# scanning
self.add_config_files(resources.json_files)
# Add features while we find new ones
features = set(self.get_features())
if features == prev_features:
break
for feature in features:
if feature in resources.features:
resources.add(resources.features[feature])
prev_features = features
self.validate_config()
return resources
@staticmethod
def config_to_header(config, fname=None):
""" Convert the configuration data to the content of a C header file,
meant to be included to a C/C++ file. The content is returned as a
string.
Positional arguments:
config - configuration data as (ConfigParam instances, ConfigMacro
instances) tuple (as returned by get_config_data())
Keyword arguments:
fname - also write the content is to the file called "fname".
WARNING: if 'fname' names an existing file, it will be
overwritten!
"""
params, macros = config[0], config[1]
Config._check_required_parameters(params)
header_data = "// Automatically generated configuration file.\n"
header_data += "// DO NOT EDIT, content will be overwritten.\n\n"
header_data += "#ifndef __MBED_CONFIG_DATA__\n"
header_data += "#define __MBED_CONFIG_DATA__\n\n"
# Compute maximum length of macro names for proper alignment
max_param_macro_name_len = (max([len(m.macro_name) for m
in params.values()
if m.value is not None])
if params else 0)
max_direct_macro_name_len = (max([len(m.macro_name) for m
in macros.values()])
if macros else 0)
max_macro_name_len = max(max_param_macro_name_len,
max_direct_macro_name_len)
# Compute maximum length of macro values for proper alignment
max_param_macro_val_len = (max([len(str(m.value)) for m
in params.values()
if m.value is not None])
if params else 0)
max_direct_macro_val_len = max([len(m.macro_value or "") for m
in macros.values()]) if macros else 0
max_macro_val_len = max(max_param_macro_val_len,
max_direct_macro_val_len)
# Generate config parameters first
if params:
header_data += "// Configuration parameters\n"
for macro in params.values():
if macro.value is not None:
header_data += ("#define {0:<{1}} {2!s:<{3}} " +
"// set by {4}\n")\
.format(macro.macro_name, max_macro_name_len,
macro.value, max_macro_val_len, macro.set_by)
# Then macros
if macros:
header_data += "// Macros\n"
for macro in macros.values():
if macro.macro_value:
header_data += ("#define {0:<{1}} {2!s:<{3}}" +
" // defined by {4}\n")\
.format(macro.macro_name, max_macro_name_len,
macro.macro_value, max_macro_val_len,
macro.defined_by)
else:
header_data += ("#define {0:<{1}}" +
" // defined by {2}\n")\
.format(macro.macro_name,
max_macro_name_len + max_macro_val_len + 1,
macro.defined_by)
header_data += "\n#endif\n"
# If fname is given, write "header_data" to it
if fname:
with open(fname, "w+") as file_desc:
file_desc.write(header_data)
return header_data
def get_config_data_header(self, fname=None):
""" Convert a Config instance to the content of a C header file, meant
to be included to a C/C++ file. The content is returned as a string.
Keyword arguments:
fname - also write the content to the file called "fname".
WARNING: if 'fname' names an existing file, it will be
overwritten!
"""
return self.config_to_header(self.get_config_data(), fname)
| apache-2.0 |
pklejch/cronalyzer | CronPredictor.py | 1 | 29208 | import datetime
import re
import calendar
from itertools import tee
from CronJob import CCronJob
from datetime import timedelta
class CCronPredictor:
""" predicts runs of cron"""
def __init__(self,minute,hour,day,month,dayOfWeek,length,command, includeRareJobs=False):
self.minute=minute
self.hour=hour
self.day=day
self.month=month
self.dayOfWeek=dayOfWeek
self.length=length
self.command=command
self.constantSpaces=True
self.spaceBetweenRuns=None
self.rareJob=False
#jsou v polozce dnu vsechny dny (*) ?
if(self.day=="*"):
self._allDays=True
else:
self._allDays=False
#jsou v polozce dnu v tydnu vsechny dny v tydnu (*) ?
if(self.dayOfWeek=="*"):
self._allDaysOfWeek=True
else:
self._allDaysOfWeek=False
#jsou v polozce dny nasobky ? (*/2)
if re.match('\*/[0-9]+', self.day):
self._multipleDays=True
else:
self._multipleDays=False
#vytvori aktualni cas
self.nextTime=datetime.datetime.now()
#nastavi sekundy a microsekundy na 0
self.nextTime=self.nextTime.replace(second=0,microsecond=0)
#vytvorim si startovni cas, pro vypocet prvni iterace
self.startTime=self.nextTime
#vyresetuju pristi cas na minimum (1. ledna 00:00)
self.nextTime=self.nextTime.replace(minute=0,hour=0,day=1,month=1)
#inicializace
self._makeSets(self.minute, self.hour, self.day, self.month, self.dayOfWeek)
self._isRare(includeRareJobs)
self._makeGens()
self._setFirstTime()
def _isRare(self, includeRareJobs):
if len(self.monthSet) < 6 and includeRareJobs:
self.monthSet=list(range(1,13))
def _makeMinuteSet(self,minute):
""" creates iterable sets filled with minutes """
#v mnozine budou vsechny minuty [0,1,2,...,59]
if minute=="*":
minuteSet=list(range(0,60))
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-9]+$', minute):
minuteSet=[int(minute)]
#v mnozine bude seznam cisel [0,1,15,25]
elif re.match('^([0-9]+,)+[0-9]+$', minute):
minuteSet=sorted(list(set(map(int,minute.split(',')))))
#v mnozine bude rozsah cisel [10,11,12,13,14,15]
elif re.match('^[0-9]+-[0-9]+$', minute):
fromTo=list(map(int,minute.split('-')))
minuteSet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
elif re.match('\*/[0-9]+', minute):
#inicializuju prazdny list
minuteSet=[]
#rozsekam zapis */5 na casti
line=minute.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s minutami od 0 do 59
allMinutes=list(range(0,60))
#projedu vsechny minuty a minuty, ktere splnuji kriteria pridam do vysledne mnoziny minut
for i in allMinutes:
if i%howOften == 0:
minuteSet.append(i)
#rozsah a modulo, napr: 0-20/5
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', minute):
minuteSet=[]
line=minute.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 0:
minuteSet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',minute):
minuteSet=set()
line=minute.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
minuteSet.add(i)
else:
minuteSet.add(int(part))
minuteSet=sorted(list(minuteSet))
return minuteSet
def _makeDayOfWeekSet(self,dayOfWeek):
#v mnozine budou vsechny dny v tydnu [0,...,6]
if dayOfWeek=="*":
dayOfWeekSet=list(range(0,7))
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-7]$', dayOfWeek):
dayOfWeekSet=[int(dayOfWeek)]
#v mnozine bude seznam cisel [0,1,4,6]
elif re.match('^([0-7],)+[0-7]$', dayOfWeek):
dayOfWeekSet=sorted(list(set(map(int,dayOfWeek.split(',')))))
#v mnozine bude rozsah dnu v tydnu [0,1,2]
elif re.match('^[0-7]-[0-7]$', dayOfWeek):
fromTo=list(map(int,dayOfWeek.split('-')))
dayOfWeekSet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
elif re.match('\*/[0-9]+', dayOfWeek):
#inicializuju prazdny list
dayOfWeekSet=[]
#rozsekam zapis */5 na casti
line=dayOfWeek.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s dny v tydnu od 0 do 6
allDaysOfWeek=list(range(0,7))
#projedu vsechny dny v tydnu a dny v tydnu, ktere splnuji kriteria pridam do vysledne mnoziny dnu v tydnu
for i in allDaysOfWeek:
if i%howOften == 0:
dayOfWeekSet.append(i)
#rozsah a modulo, napr: 0-6/2
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', dayOfWeek):
dayOfWeekSet=[]
line=dayOfWeek.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 0:
dayOfWeekSet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',dayOfWeek):
dayOfWeekSet=set()
line=dayOfWeek.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
dayOfWeekSet.add(i)
else:
dayOfWeekSet.add(int(part))
dayOfWeekSet=sorted(list(dayOfWeekSet))
return dayOfWeekSet
def _makeHourSet(self,hour):
#v mnozine budou vsechny hodiny [0,1,2,...,23]
if hour=="*":
hourSet=list(range(0,24))
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-9]+$', hour):
hourSet=[int(hour)]
#v mnozine bude seznam cisel [0,1,15,22]
elif re.match('^([0-9]+,)+[0-9]+$', hour):
hourSet=sorted(list(set(map(int,hour.split(',')))))
#v mnozine bude rozsah cisel [10,11,12,13,14,15]
elif re.match('^[0-9]+-[0-9]+$', hour):
fromTo=list(map(int,hour.split('-')))
hourSet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
elif re.match('\*/[0-9]+', hour):
#inicializuju prazdny list
hourSet=[]
#rozsekam zapis */5 na casti
line=hour.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s hodinami od 0 do 23
allHours=list(range(0,24))
#projedu vsechny hodiny a hodiny, ktere splnuji kriteria pridam do vysledne mnoziny hodin
for i in allHours:
if i%howOften == 0:
hourSet.append(i)
#rozsah a modulo, napr: 0-20/5
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', hour):
hourSet=[]
line=hour.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 0:
hourSet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',hour):
hourSet=set()
line=hour.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
hourSet.add(i)
else:
hourSet.add(int(part))
hourSet=sorted(list(hourSet))
return hourSet
def _makeDaySet(self,day):
#v mnozine budou vsechny dny [1,2,...,31] nebo [1,...,28], atd...
if day=="*":
daySet=[]
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-9]+$', day):
daySet=[int(day)]
#v mnozine bude seznam cisel [0,1,15,25]
elif re.match('^([0-9]+,)+[0-9]+$', day):
daySet=sorted(list(set(map(int,day.split(',')))))
#v mnozine bude rozsah cisel [10,11,12,13,14,15]
elif re.match('^[0-9]+-[0-9]+$', day):
fromTo=list(map(int,day.split('-')))
daySet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
#dodela se pozdeji v zavislosti na danem mesici
elif re.match('\*/[0-9]+', day):
daySet=[]
#rozsah a modulo, napr: 0-20/5
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', day):
daySet=[]
line=day.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 1:
daySet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',day):
daySet=set()
line=day.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
daySet.add(i)
else:
daySet.add(int(part))
daySet=sorted(list(daySet))
return daySet
def _makeDaySetAfter(self,day):
#inicializuju prazdny list
daySet=[]
#rozsekam zapis */5 na casti
line=day.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s dny, podle aktualniho mesice a roku
self._adjustDaySetByMonth()
allDays=self.daySet
#projedu vsechny dny a dny, ktere splnuji kriteria pridam do vysledne mnoziny dnu
for i in allDays:
if i%howOften == 1:
daySet.append(i)
return daySet
def _makeMonthSet(self,month):
#v mnozine budou vsechny mesice [1,2,...,12]
if month=="*":
monthSet=list(range(1,13))
#v mnozine bude jedno konkretni cislo [5]
elif re.match('^[0-9]+$', month):
monthSet=[int(month)]
#v mnozine bude seznam cisel [0,1,15,25]
elif re.match('^([0-9]+,)+[0-9]+$', month):
monthSet=sorted(list(set(map(int,month.split(',')))))
#v mnozine bude rozsah cisel [10,11,12,13,14,15]
elif re.match('^[0-9]+-[0-9]+$', month):
fromTo=list(map(int,month.split('-')))
monthSet=list(range(fromTo[0],fromTo[1]+1))
#v mnozine budou cisla odpovidajici napr. "kazdych 5" = [0,5,10,...,55]
elif re.match('\*/[0-9]+', month):
#inicializuju prazdny list
monthSet=[]
#rozsekam zapis */5 na casti
line=month.split('/')
#vyberu jen cast s cislem (jak casto, se to bude dit)
howOften=int(line[1])
#vytvorim si list s mesici od 1 do 12
allMonths=list(range(1,13))
#projedu vsechny mesice a mesice, ktere splnuji kriteria pridam do vysledne mnoziny mesicu
for i in allMonths:
if i%howOften == 1:
monthSet.append(i)
#rozsah a modulo, napr: 0-20/5
elif re.match('^[0-9]+-[0-9]+/[0-9]+$', month):
monthSet=[]
line=month.split("/")
howOften=int(line[1])
fromTo=list(map(int,line[0].split('-')))
allMinutes=list(range(fromTo[0],fromTo[1]+1))
for i in allMinutes:
if i%howOften == 1:
monthSet.append(i)
#kombinace rozsahu: 10-15,20-15 nebo kombinace rozsahu a vyctu: 1,10-15,17,19
elif re.match('^([0-9]+(-[0-9]+)?)(,([0-9]+(-[0-9]+)?))*$',month):
monthSet=set()
line=month.split(",")
for part in line:
if re.match('^[0-9]+-[0-9]+$', part):
fromTo=list(map(int,part.split('-')))
subRange=list(range(fromTo[0],fromTo[1]+1))
for i in subRange:
monthSet.add(i)
else:
monthSet.add(int(part))
monthSet=sorted(list(monthSet))
return monthSet
def _adjustDaySet(self,month,year):
if(month == 1 or month == 3 or month == 5 or month == 7 or month == 8 or month == 10 or month == 12):
#mesic ma 31 dni
daySet=list(range(1,32))
elif (month == 4 or month == 6 or month == 9 or month == 11):
#mesic ma 30 dni
daySet=list(range(1,31))
else:
#je to unor a prestupny rok = 29 dni
if (calendar.isleap(year)):
daySet=list(range(1,30))
#je to unor a neprestupny rok = 28 dni
else:
daySet=list(range(1,29))
return daySet
def _generateMinutes(self,minuteSet):
for i in minuteSet:
yield i
def _generateDayOfWeek(self,dayOfWeekSet):
for i in dayOfWeekSet:
yield i
def _generateHours(self,hourSet):
for i in hourSet:
yield i
def _generateDays(self,daySet):
for i in daySet:
yield i
def _generateMonths(self, monthSet):
for i in monthSet:
yield i
def _nextMinute(self):
self.nextTime=self.nextTime.replace(minute=next(self.minutes))
def _nextHour(self):
self.nextTime=self.nextTime.replace(hour=next(self.hours))
def _adjustDaySetByMonth(self, save=True):
#zkopiruju si generator mesicu
self.months, prevMonths = tee(self.months)
#zkusim se podivat na dalsi mesic v mnozine
try:
nextYear=self.nextTime.year
nextMonth=next(self.months)
#mnozina dosla
except StopIteration:
#vyresetuju generator
self.months=self._generateMonths(self.monthSet)
#nactu dalsi mesic
nextMonth=next(self.months)
#zvysim rok
nextYear=nextYear+1
#vratim generator do stavu pred posunutim
self.months=prevMonths
#upravim mnozinu dni pro nasledujici mesic
#bud ji ulozim do objektu nebo ji jen vratim
if save:
self.daySet=self._adjustDaySet(nextMonth, nextYear)
else:
return self._adjustDaySet(nextMonth, nextYear)
def _nextDay(self):
#den i den v tydnu jsou vsechny (*)
if self._allDays and self._allDaysOfWeek:
#posunu den podle vytvorene mnoziny dni
self.nextTime=self.nextTime.replace(day=next(self.days))
#dny jsou vsechny (*), dny v tydnu nejsou vsechny (konkretni den, vycet, rozsah)
elif self._allDays and not self._allDaysOfWeek:
#posunu den v tydnu podle vytvorene mnoziny dnu v tydnu
# => posun datum o +1 den dokud nebude splnovat aktualni den v tydnu z mnoziny
found=False
while True:
if found:
break
try:
self.nextTime=self.nextTime.replace(day=next(self.days))
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
for dayOfWeek in self.daysOfWeek:
dayOfWeek=self._cron2python(dayOfWeek)
if self.nextTime.weekday() == dayOfWeek:
found=True
break
except StopIteration:
#uprav daySet pro nasledujici mesic, pokud je zadan kazdy den
if self._allDays:
self._adjustDaySetByMonth()
#nebo pokud byly zadany nasobky dnu
elif self._multipleDays:
self.daySet=self._makeDaySetAfter(self.day)
#vyresetuj mnozinu dni
self.days=self._generateDays(self.daySet)
#posun o den
self.nextTime=self.nextTime.replace(day=next(self.days))
#zkus posunout mesic
try:
self._nextMonth()
#jsme na konci mnoziny mesicu, bude se posouvat i rok
except StopIteration:
#vyresetuj mnozinu mesicu
self.months=self._generateMonths(self.monthSet)
#posun mesic
self._nextMonth()
#posun rok (mnozina roku neni, nemusi se nic hlidat)
self._nextYear()
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
for dayOfWeek in self.daysOfWeek:
dayOfWeek=self._cron2python(dayOfWeek)
if self.nextTime.weekday() == dayOfWeek:
found=True
break
self._dateUsed=True
#dny nejsou vsechny (konkretni den, vycet, rozsah), dny v tydnu jsou vsechny (*)
elif not self._allDays and self._allDaysOfWeek:
#posunu den podle vytvorene mnoziny dni
self.nextTime=self.nextTime.replace(day=next(self.days))
#dny nejsou vsechny (konkretni den, vycet, rozsah) ani dny v tydnu nejsou vsechny (konkretni den, vycet, rozsah)
else:
#posunu oboje dokud nebude splnena aspon jedna podminka
#napr: 10.den v mesici a utery => pristi iterace bude v 10.den v mesici NEBO v utery (utery nemusi byt 10. den v mesici)
#raise NotImplementedError
found=False
while True:
if found:
break
try:
self.nextTime=self.nextTime.replace(day=next(self._allDaysGen))
#vyresetuj generator dnu v tydnu
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
#projdi dny v tydnu a hledej shodu
for dayOfWeek in self.daysOfWeek:
dayOfWeek=self._cron2python(dayOfWeek)
if self.nextTime.weekday() == dayOfWeek:
found=True
break
#vyresetuj generator dnu v mesici
self.days=self._generateDays(self.daySet)
#projdi dny v mesici a hledej shodu
for day in self.days:
if self.nextTime.day == day:
found=True
break
except StopIteration:
#vytvor pomocnou mnozinu vsech dni pro pristi mesic
self._allDaySet=self._adjustDaySetByMonth(False)
#vyresetuj pomocnou mnozinu vsech dni
self._allDaysGen=self._generateDays(self._allDaySet)
#posun o den
self.nextTime=self.nextTime.replace(day=next(self._allDaysGen))
#zkus posunout mesic
try:
self._nextMonth()
#jsme na konci mnoziny mesicu, bude se posouvat i rok
except StopIteration:
#vyresetuj mnozinu mesicu
self.months=self._generateMonths(self.monthSet)
#posun mesic
self._nextMonth()
#posun rok (mnozina roku neni, nemusi se nic hlidat)
self._nextYear()
#vyresetuj generator dnu v tydnu
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
#projdi dny v tydnu a hledej shodu
for dayOfWeek in self.daysOfWeek:
dayOfWeek=self._cron2python(dayOfWeek)
if self.nextTime.weekday() == dayOfWeek:
found=True
break
#vyresetuj generator dnu v mesici
self.days=self._generateDays(self.daySet)
#projdi dny v mesici a hledej shodu
for day in self.days:
if self.nextTime.day == day:
found=True
break
self._dateUsed=True
def _nextMonth(self):
try:
self.nextTime=self.nextTime.replace(month=next(self.months))
return True
except ValueError:
return False
def _nextYear(self):
currYear=self.nextTime.year
self.nextTime=self.nextTime.replace(year=currYear+1)
def _cron2python(self, dayOfWeek):
return (dayOfWeek+6)%7
def _makeSets(self, minute, hour, day, month, dayOfWeek):
#vytvori mnoziny
self.minuteSet=self._makeMinuteSet(minute)
self.hourSet=self._makeHourSet(hour)
self.monthSet=self._makeMonthSet(month)
self.daySet=self._makeDaySet(day)
self.dayOfWeekSet=self._makeDayOfWeekSet(dayOfWeek)
def _makeGens(self):
#vytvori generatory z mnozin
self.minutes=self._generateMinutes(self.minuteSet)
self.hours=self._generateHours(self.hourSet)
self.days=self._generateDays(self.daySet)
self.months=self._generateMonths(self.monthSet)
self.daysOfWeek=self._generateDayOfWeek(self.dayOfWeekSet)
#vytvoreni pomocne mnoziny vsech dni
if not self._allDays and not self._allDaysOfWeek:
self._allDaySet=self._adjustDaySetByMonth(False)
self._allDaysGen=self._generateDays(self._allDaySet)
#doupraveni mnoziny dni pro zapisy "*/5" atd. a vytvoreni prislusneho generatoru
if self._multipleDays:
self.daySet=self._makeDaySetAfter(self.day)
self.days=self._generateDays(self.daySet)
#douprave mnoziny dni pro zapisy "*" a vytvoreni prislusneho generatoru
if self._allDays:
#vytvor mnozinu dni podle nasledujiciho mesice a roku
self._adjustDaySetByMonth()
#vytvor generator
self.days=self._generateDays(self.daySet)
def _setFirstTime(self):
self._dateUsed=False
#inicializace generatoru
self._nextMinute()
self._nextHour()
self._nextMonth()
self._nextDay()
while ((self.nextTime<=self.startTime)):
self._dateUsed=False
#posun cas
self._predictNext()
self._dateUsed=False
def _predictNext(self):
try:
#zkus posunout minutu dal
self._nextMinute()
except StopIteration:
#dorazilo se na konec mnoziny, bude se posouvit i hodina
#vyresetuj generator minut
self.minutes=self._generateMinutes(self.minuteSet)
#posun opet minutu
self._nextMinute()
#zkus posunout i hodinu
try:
self._nextHour()
#jsme na konci mnoziny hodin, bude se posouvat i den
except StopIteration:
#vyresetuj mnozinu hodin
self.hours=self._generateHours(self.hourSet)
#posun hodinu
self._nextHour()
#zkus posunout den
try:
self._nextDay()
#jsme na konci mnoziny dni, bude se posouvat i mesic
except StopIteration:
#uprav daySet pro nasledujici mesic, pokud je zadan kazdy den
if self._allDays:
self._adjustDaySetByMonth()
#nebo pokud byly zadany nasobky dnu
elif self._multipleDays:
self.daySet=self._makeDaySetAfter(self.day)
#vyresetuj mnozinu dni
self.days=self._generateDays(self.daySet)
#posun den
self._nextDay()
#zkus posunout mesic
#pokud je den fixni, napr. 30 a mesic nema tolik dni (napr. unor), preskoc ho a zkus dalsi
try:
while not self._nextMonth():
pass
#jsme na konci mnoziny mesicu, bude se posouvat i rok
except StopIteration:
#vyresetuj mnozinu mesicu
self.months=self._generateMonths(self.monthSet)
#posun mesic
self._nextMonth()
#posun rok (mnozina roku neni, nemusi se nic hlidat)
self._nextYear()
def _printTime(self):
print(self.nextTime.strftime("%H:%M:%S %d.%m.%Y"))
def _printInterval(self,timeFrom,timeTo):
print(timeFrom.strftime("%H:%M:%S %d.%m.%Y")+" ---> "+timeTo.strftime("%H:%M:%S %d.%m.%Y"))
def _addTime(self,startTime,length):
lengthTimedelta=timedelta(seconds=length)
endTime=startTime+lengthTimedelta
return endTime
def iterate(self,n,cronJobList):
for _ in range(n):
endTime=self._addTime(self.nextTime, self.length)
cronJobList.append(CCronJob(self.nextTime,endTime))
#self._printInterval(self.nextTime, endTime)
self._predictNext()
self._dateUsed=False
return cronJobList
def iterateUntil(self,toDate,cronJobList):
prevDuration=None
while self.nextTime<=toDate:
prevTime=self.nextTime
endTime=self._addTime(self.nextTime, self.length)
cronJobList.append(CCronJob(self.nextTime,endTime))
#self._printInterval(self.nextTime, endTime)
self._predictNext()
duration = self.nextTime - prevTime
self.spaceBetweenRuns=duration
if prevDuration is not None and prevDuration != duration:
self.constantSpaces=False
self.spaceBetweenRuns=None
prevDuration = duration
self._dateUsed=False
return cronJobList
def test(self,name, n=100):
#otevri soubor pro zapis vystupu predikce cronu
file=open("/home/petr/git/Cron Analyzer/test/"+"output"+str(name),"w+")
for _ in range(n):
file.write(self.nextTime.strftime("%Y-%m-%d %H:%M:%S")+"\n")
self._predictNext()
self._dateUsed=False
file.close()
def getAllDays(self):
return self._allDays
def getAllDaysOfWeek(self):
return self._allDaysOfWeek
| gpl-3.0 |
laiqiqi886/kbengine | kbe/res/scripts/common/Lib/distutils/cygwinccompiler.py | 87 | 16471 | """distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate a import library for its dll
# - create a def-file for python??.dll
# - create a import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
import os
import sys
import copy
from subprocess import Popen, PIPE, check_output
import re
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import (DistutilsExecError, CCompilerError,
CompileError, UnknownFileError)
from distutils import log
from distutils.version import LooseVersion
from distutils.spawn import find_executable
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler(UnixCCompiler):
""" Handles the Cygwin port of the GNU C compiler to Windows.
"""
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
exe_extension = ".exe"
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compiles the source by spawning GCC and windres if needed."""
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError as msg:
raise CompileError(msg)
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
"""Link the objects."""
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KB < stripped_file < ??100KB
# unstripped_file = stripped_file + XXX KB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self, target_desc, objects, output_filename,
output_dir, libraries, library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug, extra_preargs, extra_postargs, build_temp,
target_lang)
# -- Miscellaneous methods -----------------------------------------
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""Adds supports for rc and res files."""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext in ('.res', '.rc'):
# these need to be compiled to object files
obj_names.append (os.path.join(output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join(output_dir,
base + self.obj_extension))
return obj_names
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(CygwinCCompiler):
""" Handles the Mingw32 port of the GNU C compiler to Windows.
"""
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if is_cygwingcc():
raise CCompilerError(
'Cygwin gcc cannot be used with --compiler=mingw32')
self.set_executables(compiler='gcc -O -Wall',
compiler_so='gcc -mdll -O -Wall',
compiler_cxx='g++ -O -Wall',
linker_exe='gcc',
linker_so='%s %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using a unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation appears amenable to building
extensions with GCC.
Returns a tuple (status, details), where 'status' is one of the following
constants:
- CONFIG_H_OK: all is well, go ahead and compile
- CONFIG_H_NOTOK: doesn't look good
- CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
# if sys.version contains GCC then python was compiled with GCC, and the
# pyconfig.h file should be OK
if "GCC" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'GCC'"
# let's see if __GNUC__ is mentioned in python.h
fn = sysconfig.get_config_h_filename()
try:
config_h = open(fn)
try:
if "__GNUC__" in config_h.read():
return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
else:
return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
finally:
config_h.close()
except OSError as exc:
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
RE_VERSION = re.compile(b'(\d+\.\d+(\.\d+)*)')
def _find_exe_version(cmd):
"""Find the version of an executable by running `cmd` in the shell.
If the command is not found, or the output does not match
`RE_VERSION`, returns None.
"""
executable = cmd.split()[0]
if find_executable(executable) is None:
return None
out = Popen(cmd, shell=True, stdout=PIPE).stdout
try:
out_string = out.read()
finally:
out.close()
result = RE_VERSION.search(out_string)
if result is None:
return None
# LooseVersion works with strings
# so we need to decode our bytes
return LooseVersion(result.group(1).decode())
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
def is_cygwingcc():
'''Try to determine if the gcc that would be used is from cygwin.'''
out_string = check_output(['gcc', '-dumpmachine'])
return out_string.strip().endswith(b'cygwin')
| lgpl-3.0 |
factorlibre/account-financial-tools | account_move_template/account_move_template.py | 29 | 4181 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 - 2014 Agile Business Group sagl
# (<http://www.agilebg.com>)
# Copyright (C) 2011 Domsense srl (<http://www.domsense.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp.exceptions import ValidationError
class AccountMoveTemplate(models.Model):
_name = 'account.move.template'
_inherit = 'account.document.template'
@api.model
def _company_get(self):
return self.env['res.company']._company_default_get(
object='account.move.template'
)
company_id = fields.Many2one(
comodel_name='res.company',
string='Company',
required=True,
change_default=True,
default=_company_get,
)
template_line_ids = fields.One2many(
comodel_name='account.move.template.line',
inverse_name='template_id',
string='Template Lines'
)
cross_journals = fields.Boolean(string='Cross-Journals')
transitory_acc_id = fields.Many2one(
comodel_name='account.account',
string='Transitory account',
required=False
)
@api.constrains('journal_id')
def _check_different_journal(self):
# Check that the journal on these lines are different/same in the case
# of cross journals/single journal
journal_ids = []
all_journal_ids = []
error_message = (
u'If the template is "cross-journals", the Journals must be '
u'different, if the template does not "cross-journals" the '
u'Journals must be the same!'
)
for move_template in self:
if move_template.template_line_ids:
for template_line in move_template.template_line_ids:
all_journal_ids.append(template_line.journal_id.id)
if template_line.journal_id.id not in journal_ids:
journal_ids.append(template_line.journal_id.id)
if move_template.cross_journals:
if len(all_journal_ids) != len(journal_ids):
raise ValidationError(error_message)
elif len(journal_ids) != 1:
raise ValidationError(error_message)
class AccountMoveTemplateLine(models.Model):
_name = 'account.move.template.line'
_inherit = 'account.document.template.line'
journal_id = fields.Many2one(
comodel_name='account.journal',
string='Journal',
required=True
)
account_id = fields.Many2one(
comodel_name='account.account',
string='Account',
required=True,
ondelete="cascade"
)
move_line_type = fields.Selection(
[('cr', 'Credit'), ('dr', 'Debit')],
string='Move Line Type',
required=True
)
analytic_account_id = fields.Many2one(
comodel_name='account.analytic.account',
string='Analytic Account',
ondelete="cascade"
)
template_id = fields.Many2one(
comodel_name='account.move.template',
string='Template'
)
account_tax_id = fields.Many2one(
comodel_name='account.tax',
string='Tax'
)
_sql_constraints = [
('sequence_template_uniq', 'unique (template_id,sequence)',
'The sequence of the line must be unique per template !')
]
| agpl-3.0 |
Ziemin/telepathy-gabble | tests/twisted/text/destroy.py | 2 | 3534 | """
Test text channel not being recreated because although there were still
pending messages, we destroyed it with extreme prejudice.
"""
import dbus
from twisted.words.xish import domish
from gabbletest import exec_test
from servicetest import call_async, EventPattern, wrap_channel, assertEquals
import constants as cs
def test(q, bus, conn, stream):
self_handle = conn.Properties.Get(cs.CONN, "SelfHandle")
jid = 'foo@bar.com'
foo_handle = conn.get_contact_handle_sync(jid)
call_async(q, conn.Requests, 'CreateChannel', {
cs.CHANNEL_TYPE: cs.CHANNEL_TYPE_TEXT,
cs.TARGET_HANDLE_TYPE: cs.HT_CONTACT,
cs.TARGET_HANDLE: foo_handle })
ret, new_sig = q.expect_many(
EventPattern('dbus-return', method='CreateChannel'),
EventPattern('dbus-signal', signal='NewChannels'),
)
text_chan = wrap_channel(bus.get_object(conn.bus_name, ret.value[0]), 'Text')
chan_iface = dbus.Interface(text_chan, cs.CHANNEL)
destroyable_iface = dbus.Interface(text_chan, cs.CHANNEL_IFACE_DESTROYABLE)
assert len(new_sig.args) == 1
assert len(new_sig.args[0]) == 1 # one channel
assert len(new_sig.args[0][0]) == 2 # two struct members
assert new_sig.args[0][0][0] == ret.value[0]
emitted_props = new_sig.args[0][0][1]
assert emitted_props[cs.CHANNEL_TYPE] == cs.CHANNEL_TYPE_TEXT
assert emitted_props[cs.TARGET_HANDLE_TYPE] == cs.HT_CONTACT
assert emitted_props[cs.TARGET_HANDLE] == foo_handle
assert emitted_props[cs.TARGET_ID] == jid
assert emitted_props[cs.REQUESTED] == True
assert emitted_props[cs.INITIATOR_HANDLE] == self_handle
assert emitted_props[cs.INITIATOR_ID] == 'test@localhost'
channel_props = text_chan.GetAll(
cs.CHANNEL, dbus_interface=dbus.PROPERTIES_IFACE)
assert channel_props['TargetID'] == jid, (channel_props['TargetID'], jid)
assert channel_props['Requested'] == True
assert channel_props['InitiatorHandle'] == self_handle,\
(channel_props['InitiatorHandle'], self_handle)
assert channel_props['InitiatorID'] == 'test@localhost',\
channel_props['InitiatorID']
text_chan.send_msg_sync('hey')
event = q.expect('stream-message')
elem = event.stanza
assert elem.name == 'message'
assert elem['type'] == 'chat'
body = list(event.stanza.elements())[0]
assert body.name == 'body'
assert body.children[0] == u'hey'
# <message type="chat"><body>hello</body</message>
m = domish.Element((None, 'message'))
m['from'] = 'foo@bar.com/Pidgin'
m['type'] = 'chat'
m.addElement('body', content='hello')
stream.send(m)
event = q.expect('dbus-signal', signal='MessageReceived')
msg = event.args[0]
assertEquals(foo_handle, msg[0]['message-sender'])
assertEquals('hello', msg[1]['content'])
messages = text_chan.Properties.Get(cs.CHANNEL_IFACE_MESSAGES, 'PendingMessages')
assertEquals([msg], messages)
# destroy the channel without acking the message; it does not come back
call_async(q, destroyable_iface, 'Destroy')
event = q.expect('dbus-signal', signal='Closed')
assert event.path == text_chan.object_path,\
(event.path, text_chan.object_path)
event = q.expect('dbus-return', method='Destroy')
# assert that it stays dead
try:
chan_iface.GetChannelType()
except dbus.DBusException:
pass
else:
raise AssertionError("Why won't it die?")
if __name__ == '__main__':
exec_test(test)
| lgpl-2.1 |
pombredanne/grumpy | grumpy-runtime-src/third_party/pypy/datetime.py | 6 | 74032 | """Concrete date/time and related types -- prototype implemented in Python.
See http://www.zope.org/Members/fdrake/DateTimeWiki/FrontPage
See also http://dir.yahoo.com/Reference/calendars/
For a primer on DST, including many current DST rules, see
http://webexhibits.org/daylightsaving/
For more about DST than you ever wanted to know, see
ftp://elsie.nci.nih.gov/pub/
Sources for time zone and DST data: http://www.twinsun.com/tz/tz-link.htm
This was originally copied from the sandbox of the CPython CVS repository.
Thanks to Tim Peters for suggesting using it.
"""
# from __future__ import division
import time as _time
import math as _math
# import struct as _struct
import _struct
def divmod(x, y):
x, y = int(x), int(y)
return x / y, x % y
_SENTINEL = object()
def _cmp(x, y):
return 0 if x == y else 1 if x > y else -1
def _round(x):
return int(_math.floor(x + 0.5) if x >= 0.0 else _math.ceil(x - 0.5))
MINYEAR = 1
MAXYEAR = 9999
_MINYEARFMT = 1900
_MAX_DELTA_DAYS = 999999999
# Utility functions, adapted from Python's Demo/classes/Dates.py, which
# also assumes the current Gregorian calendar indefinitely extended in
# both directions. Difference: Dates.py calls January 1 of year 0 day
# number 1. The code here calls January 1 of year 1 day number 1. This is
# to match the definition of the "proleptic Gregorian" calendar in Dershowitz
# and Reingold's "Calendrical Calculations", where it's the base calendar
# for all computations. See the book for algorithms for converting between
# proleptic Gregorian ordinals and many other calendar systems.
_DAYS_IN_MONTH = [-1, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_DAYS_BEFORE_MONTH = [-1]
dbm = 0
for dim in _DAYS_IN_MONTH[1:]:
_DAYS_BEFORE_MONTH.append(dbm)
dbm += dim
del dbm, dim
def _is_leap(year):
"year -> 1 if leap year, else 0."
return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0)
def _days_before_year(year):
"year -> number of days before January 1st of year."
y = year - 1
return y*365 + y//4 - y//100 + y//400
def _days_in_month(year, month):
"year, month -> number of days in that month in that year."
assert 1 <= month <= 12, month
if month == 2 and _is_leap(year):
return 29
return _DAYS_IN_MONTH[month]
def _days_before_month(year, month):
"year, month -> number of days in year preceding first day of month."
assert 1 <= month <= 12, 'month must be in 1..12'
return _DAYS_BEFORE_MONTH[month] + (month > 2 and _is_leap(year))
def _ymd2ord(year, month, day):
"year, month, day -> ordinal, considering 01-Jan-0001 as day 1."
assert 1 <= month <= 12, 'month must be in 1..12'
dim = _days_in_month(year, month)
assert 1 <= day <= dim, ('day must be in 1..%d' % dim)
return (_days_before_year(year) +
_days_before_month(year, month) +
day)
_DI400Y = _days_before_year(401) # number of days in 400 years
_DI100Y = _days_before_year(101) # " " " " 100 "
_DI4Y = _days_before_year(5) # " " " " 4 "
# A 4-year cycle has an extra leap day over what we'd get from pasting
# together 4 single years.
assert _DI4Y == 4 * 365 + 1
# Similarly, a 400-year cycle has an extra leap day over what we'd get from
# pasting together 4 100-year cycles.
assert _DI400Y == 4 * _DI100Y + 1
# OTOH, a 100-year cycle has one fewer leap day than we'd get from
# pasting together 25 4-year cycles.
assert _DI100Y == 25 * _DI4Y - 1
_US_PER_US = 1
_US_PER_MS = 1000
_US_PER_SECOND = 1000000
_US_PER_MINUTE = 60000000
_SECONDS_PER_DAY = 24 * 3600
_US_PER_HOUR = 3600000000
_US_PER_DAY = 86400000000
_US_PER_WEEK = 604800000000
def _ord2ymd(n):
"ordinal -> (year, month, day), considering 01-Jan-0001 as day 1."
# n is a 1-based index, starting at 1-Jan-1. The pattern of leap years
# repeats exactly every 400 years. The basic strategy is to find the
# closest 400-year boundary at or before n, then work with the offset
# from that boundary to n. Life is much clearer if we subtract 1 from
# n first -- then the values of n at 400-year boundaries are exactly
# those divisible by _DI400Y:
#
# D M Y n n-1
# -- --- ---- ---------- ----------------
# 31 Dec -400 -_DI400Y -_DI400Y -1
# 1 Jan -399 -_DI400Y +1 -_DI400Y 400-year boundary
# ...
# 30 Dec 000 -1 -2
# 31 Dec 000 0 -1
# 1 Jan 001 1 0 400-year boundary
# 2 Jan 001 2 1
# 3 Jan 001 3 2
# ...
# 31 Dec 400 _DI400Y _DI400Y -1
# 1 Jan 401 _DI400Y +1 _DI400Y 400-year boundary
n -= 1
n400, n = divmod(n, _DI400Y)
year = n400 * 400 + 1 # ..., -399, 1, 401, ...
# Now n is the (non-negative) offset, in days, from January 1 of year, to
# the desired date. Now compute how many 100-year cycles precede n.
# Note that it's possible for n100 to equal 4! In that case 4 full
# 100-year cycles precede the desired day, which implies the desired
# day is December 31 at the end of a 400-year cycle.
n100, n = divmod(n, _DI100Y)
# Now compute how many 4-year cycles precede it.
n4, n = divmod(n, _DI4Y)
# And now how many single years. Again n1 can be 4, and again meaning
# that the desired day is December 31 at the end of the 4-year cycle.
n1, n = divmod(n, 365)
year += n100 * 100 + n4 * 4 + n1
if n1 == 4 or n100 == 4:
assert n == 0
return year-1, 12, 31
# Now the year is correct, and n is the offset from January 1. We find
# the month via an estimate that's either exact or one too large.
leapyear = n1 == 3 and (n4 != 24 or n100 == 3)
assert leapyear == _is_leap(year)
month = (n + 50) >> 5
preceding = _DAYS_BEFORE_MONTH[month] + (month > 2 and leapyear)
if preceding > n: # estimate is too large
month -= 1
preceding -= _DAYS_IN_MONTH[month] + (month == 2 and leapyear)
n -= preceding
assert 0 <= n < _days_in_month(year, month)
# Now the year and month are correct, and n is the offset from the
# start of that month: we're done!
return year, month, n+1
# Month and day names. For localized versions, see the calendar module.
_MONTHNAMES = [None, "Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec"]
_DAYNAMES = [None, "Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
def _build_struct_time(y, m, d, hh, mm, ss, dstflag):
wday = (_ymd2ord(y, m, d) + 6) % 7
dnum = _days_before_month(y, m) + d
return _time.struct_time((y, m, d, hh, mm, ss, wday, dnum, dstflag))
def _format_time(hh, mm, ss, us):
# Skip trailing microseconds when us==0.
result = "%02d:%02d:%02d" % (hh, mm, ss)
if us:
result += ".%06d" % us
return result
# Correctly substitute for %z and %Z escapes in strftime formats.
# def _wrap_strftime(object, format, timetuple):
# year = timetuple[0]
# if year < _MINYEARFMT:
# raise ValueError("year=%d is before %d; the datetime strftime() "
# "methods require year >= %d" %
# (year, _MINYEARFMT, _MINYEARFMT))
# # Don't call utcoffset() or tzname() unless actually needed.
# freplace = None # the string to use for %f
# zreplace = None # the string to use for %z
# Zreplace = None # the string to use for %Z
# # Scan format for %z and %Z escapes, replacing as needed.
# newformat = []
# push = newformat.append
# i, n = 0, len(format)
# while i < n:
# ch = format[i]
# i += 1
# if ch == '%':
# if i < n:
# ch = format[i]
# i += 1
# if ch == 'f':
# if freplace is None:
# freplace = '%06d' % getattr(object,
# 'microsecond', 0)
# newformat.append(freplace)
# elif ch == 'z':
# if zreplace is None:
# zreplace = ""
# if hasattr(object, "_utcoffset"):
# offset = object._utcoffset()
# if offset is not None:
# sign = '+'
# if offset < 0:
# offset = -offset
# sign = '-'
# h, m = divmod(offset, 60)
# zreplace = '%c%02d%02d' % (sign, h, m)
# assert '%' not in zreplace
# newformat.append(zreplace)
# elif ch == 'Z':
# if Zreplace is None:
# Zreplace = ""
# if hasattr(object, "tzname"):
# s = object.tzname()
# if s is not None:
# # strftime is going to have at this: escape %
# Zreplace = s.replace('%', '%%')
# newformat.append(Zreplace)
# else:
# push('%')
# push(ch)
# else:
# push('%')
# else:
# push(ch)
# newformat = "".join(newformat)
# return _time.strftime(newformat, timetuple)
# Just raise TypeError if the arg isn't None or a string.
def _check_tzname(name):
if name is not None and not isinstance(name, str):
raise TypeError("tzinfo.tzname() must return None or string, "
"not '%s'" % type(name))
# name is the offset-producing method, "utcoffset" or "dst".
# offset is what it returned.
# If offset isn't None or timedelta, raises TypeError.
# If offset is None, returns None.
# Else offset is checked for being in range, and a whole # of minutes.
# If it is, its integer value is returned. Else ValueError is raised.
def _check_utc_offset(name, offset):
assert name in ("utcoffset", "dst")
if offset is None:
return
if not isinstance(offset, timedelta):
raise TypeError("tzinfo.%s() must return None "
"or timedelta, not '%s'" % (name, type(offset)))
days = offset.days
if days < -1 or days > 0:
offset = 1440 # trigger out-of-range
else:
seconds = days * 86400 + offset.seconds
minutes, seconds = divmod(seconds, 60)
if seconds or offset.microseconds:
raise ValueError("tzinfo.%s() must return a whole number "
"of minutes" % name)
offset = minutes
if not -1440 < offset < 1440:
raise ValueError("%s()=%d, must be in -1439..1439" % (name, offset))
return offset
def _check_int_field(value):
if isinstance(value, int):
return int(value)
if not isinstance(value, float):
try:
value = value.__int__()
except AttributeError:
pass
else:
if isinstance(value, int):
return int(value)
elif isinstance(value, long):
return int(long(value))
raise TypeError('__int__ method should return an integer')
raise TypeError('an integer is required')
raise TypeError('integer argument expected, got float')
def _check_date_fields(year, month, day):
year = _check_int_field(year)
month = _check_int_field(month)
day = _check_int_field(day)
if not MINYEAR <= year <= MAXYEAR:
raise ValueError('year must be in %d..%d' % (MINYEAR, MAXYEAR), year)
if not 1 <= month <= 12:
raise ValueError('month must be in 1..12', month)
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
raise ValueError('day must be in 1..%d' % dim, day)
return year, month, day
def _check_time_fields(hour, minute, second, microsecond):
hour = _check_int_field(hour)
minute = _check_int_field(minute)
second = _check_int_field(second)
microsecond = _check_int_field(microsecond)
if not 0 <= hour <= 23:
raise ValueError('hour must be in 0..23', hour)
if not 0 <= minute <= 59:
raise ValueError('minute must be in 0..59', minute)
if not 0 <= second <= 59:
raise ValueError('second must be in 0..59', second)
if not 0 <= microsecond <= 999999:
raise ValueError('microsecond must be in 0..999999', microsecond)
return hour, minute, second, microsecond
def _check_tzinfo_arg(tz):
if tz is not None and not isinstance(tz, tzinfo):
raise TypeError("tzinfo argument must be None or of a tzinfo subclass")
# Notes on comparison: In general, datetime module comparison operators raise
# TypeError when they don't know how to do a comparison themself. If they
# returned NotImplemented instead, comparison could (silently) fall back to
# the default compare-objects-by-comparing-their-memory-addresses strategy,
# and that's not helpful. There are two exceptions:
#
# 1. For date and datetime, if the other object has a "timetuple" attr,
# NotImplemented is returned. This is a hook to allow other kinds of
# datetime-like objects a chance to intercept the comparison.
#
# 2. Else __eq__ and __ne__ return False and True, respectively. This is
# so opertaions like
#
# x == y
# x != y
# x in sequence
# x not in sequence
# dict[x] = y
#
# don't raise annoying TypeErrors just because a datetime object
# is part of a heterogeneous collection. If there's no known way to
# compare X to a datetime, saying they're not equal is reasonable.
def _cmperror(x, y):
raise TypeError("can't compare '%s' to '%s'" % (
type(x).__name__, type(y).__name__))
def _normalize_pair(hi, lo, factor):
if not 0 <= lo <= factor-1:
inc, lo = divmod(lo, factor)
hi += inc
return hi, lo
def _normalize_datetime(y, m, d, hh, mm, ss, us, ignore_overflow=False):
# Normalize all the inputs, and store the normalized values.
ss, us = _normalize_pair(ss, us, 1000000)
mm, ss = _normalize_pair(mm, ss, 60)
hh, mm = _normalize_pair(hh, mm, 60)
d, hh = _normalize_pair(d, hh, 24)
y, m, d = _normalize_date(y, m, d, ignore_overflow)
return y, m, d, hh, mm, ss, us
def _normalize_date(year, month, day, ignore_overflow=False):
# That was easy. Now it gets muddy: the proper range for day
# can't be determined without knowing the correct month and year,
# but if day is, e.g., plus or minus a million, the current month
# and year values make no sense (and may also be out of bounds
# themselves).
# Saying 12 months == 1 year should be non-controversial.
if not 1 <= month <= 12:
year, month = _normalize_pair(year, month-1, 12)
month += 1
assert 1 <= month <= 12
# Now only day can be out of bounds (year may also be out of bounds
# for a datetime object, but we don't care about that here).
# If day is out of bounds, what to do is arguable, but at least the
# method here is principled and explainable.
dim = _days_in_month(year, month)
if not 1 <= day <= dim:
# Move day-1 days from the first of the month. First try to
# get off cheap if we're only one day out of range (adjustments
# for timezone alone can't be worse than that).
if day == 0: # move back a day
month -= 1
if month > 0:
day = _days_in_month(year, month)
else:
year, month, day = year-1, 12, 31
elif day == dim + 1: # move forward a day
month += 1
day = 1
if month > 12:
month = 1
year += 1
else:
ordinal = _ymd2ord(year, month, 1) + (day - 1)
year, month, day = _ord2ymd(ordinal)
if not ignore_overflow and not MINYEAR <= year <= MAXYEAR:
raise OverflowError("date value out of range")
return year, month, day
def _accum(tag, sofar, num, factor, leftover):
if isinstance(num, (int, long)):
prod = num * factor
rsum = sofar + prod
return rsum, leftover
if isinstance(num, float):
fracpart, intpart = _math.modf(num)
prod = int(intpart) * factor
rsum = sofar + prod
if fracpart == 0.0:
return rsum, leftover
assert isinstance(factor, (int, long))
fracpart, intpart = _math.modf(factor * fracpart)
rsum += int(intpart)
return rsum, leftover + fracpart
raise TypeError("unsupported type for timedelta %s component: %s" %
(tag, type(num)))
class timedelta(object):
"""Represent the difference between two datetime objects.
Supported operators:
- add, subtract timedelta
- unary plus, minus, abs
- compare to timedelta
- multiply, divide by int/long
In addition, datetime supports subtraction of two datetime objects
returning a timedelta, and addition or subtraction of a datetime
and a timedelta giving a datetime.
Representation: (days, seconds, microseconds). Why? Because I
felt like it.
"""
__slots__ = '_days', '_seconds', '_microseconds', '_hashcode'
def __new__(cls, days=_SENTINEL, seconds=_SENTINEL, microseconds=_SENTINEL,
milliseconds=_SENTINEL, minutes=_SENTINEL, hours=_SENTINEL, weeks=_SENTINEL):
x = 0
leftover = 0.0
if microseconds is not _SENTINEL:
x, leftover = _accum("microseconds", x, microseconds, _US_PER_US, leftover)
if milliseconds is not _SENTINEL:
x, leftover = _accum("milliseconds", x, milliseconds, _US_PER_MS, leftover)
if seconds is not _SENTINEL:
x, leftover = _accum("seconds", x, seconds, _US_PER_SECOND, leftover)
if minutes is not _SENTINEL:
x, leftover = _accum("minutes", x, minutes, _US_PER_MINUTE, leftover)
if hours is not _SENTINEL:
x, leftover = _accum("hours", x, hours, _US_PER_HOUR, leftover)
if days is not _SENTINEL:
x, leftover = _accum("days", x, days, _US_PER_DAY, leftover)
if weeks is not _SENTINEL:
x, leftover = _accum("weeks", x, weeks, _US_PER_WEEK, leftover)
if leftover != 0.0:
x += _round(leftover)
return cls._from_microseconds(x)
@classmethod
def _from_microseconds(cls, us):
s, us = divmod(us, _US_PER_SECOND)
d, s = divmod(s, _SECONDS_PER_DAY)
return cls._create(d, s, us, False)
@classmethod
def _create(cls, d, s, us, normalize):
if normalize:
s, us = _normalize_pair(s, us, 1000000)
d, s = _normalize_pair(d, s, 24*3600)
if not -_MAX_DELTA_DAYS <= d <= _MAX_DELTA_DAYS:
raise OverflowError("days=%d; must have magnitude <= %d" % (d, _MAX_DELTA_DAYS))
self = object.__new__(cls)
self._days = d
self._seconds = s
self._microseconds = us
self._hashcode = -1
return self
def _to_microseconds(self):
return ((self._days * _SECONDS_PER_DAY + self._seconds) * _US_PER_SECOND +
self._microseconds)
def __repr__(self):
module = "datetime." if self.__class__ is timedelta else ""
if self._microseconds:
return "%s(%d, %d, %d)" % (module + self.__class__.__name__,
self._days,
self._seconds,
self._microseconds)
if self._seconds:
return "%s(%d, %d)" % (module + self.__class__.__name__,
self._days,
self._seconds)
return "%s(%d)" % (module + self.__class__.__name__, self._days)
def __str__(self):
mm, ss = divmod(self._seconds, 60)
hh, mm = divmod(mm, 60)
s = "%d:%02d:%02d" % (hh, mm, ss)
if self._days:
def plural(n):
return n, abs(n) != 1 and "s" or ""
s = ("%d day%s, " % plural(self._days)) + s
if self._microseconds:
s = s + ".%06d" % self._microseconds
return s
def total_seconds(self):
"""Total seconds in the duration."""
# return self._to_microseconds() / 10**6
return float(self._to_microseconds()) / float(10**6)
# Read-only field accessors
@property
def days(self):
"""days"""
return self._days
@property
def seconds(self):
"""seconds"""
return self._seconds
@property
def microseconds(self):
"""microseconds"""
return self._microseconds
def __add__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days + other._days,
self._seconds + other._seconds,
self._microseconds + other._microseconds,
True)
return NotImplemented
def __sub__(self, other):
if isinstance(other, timedelta):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days - other._days,
self._seconds - other._seconds,
self._microseconds - other._microseconds,
True)
return NotImplemented
def __neg__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(-self._days,
-self._seconds,
-self._microseconds,
True)
def __pos__(self):
# for CPython compatibility, we cannot use
# our __class__ here, but need a real timedelta
return timedelta._create(self._days,
self._seconds,
self._microseconds,
False)
def __abs__(self):
if self._days < 0:
return -self
else:
return self
def __mul__(self, other):
if not isinstance(other, (int, long)):
return NotImplemented
usec = self._to_microseconds()
return timedelta._from_microseconds(usec * other)
__rmul__ = __mul__
def __div__(self, other):
if not isinstance(other, (int, long)):
return NotImplemented
usec = self._to_microseconds()
# return timedelta._from_microseconds(usec // other)
return timedelta._from_microseconds(int(usec) / int(other))
__floordiv__ = __div__
# Comparisons of timedelta objects with other.
def __eq__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, timedelta):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, timedelta)
return _cmp(self._getstate(), other._getstate())
def __hash__(self):
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
def __nonzero__(self):
return (self._days != 0 or
self._seconds != 0 or
self._microseconds != 0)
# Pickle support.
def _getstate(self):
return (self._days, self._seconds, self._microseconds)
def __reduce__(self):
return (self.__class__, self._getstate())
timedelta.min = timedelta(-_MAX_DELTA_DAYS)
timedelta.max = timedelta(_MAX_DELTA_DAYS, 24*3600-1, 1000000-1)
timedelta.resolution = timedelta(microseconds=1)
class date(object):
"""Concrete date type.
Constructors:
__new__()
fromtimestamp()
today()
fromordinal()
Operators:
__repr__, __str__
__cmp__, __hash__
__add__, __radd__, __sub__ (add/radd only with timedelta arg)
Methods:
timetuple()
toordinal()
weekday()
isoweekday(), isocalendar(), isoformat()
ctime()
strftime()
Properties (readonly):
year, month, day
"""
__slots__ = '_year', '_month', '_day', '_hashcode'
def __new__(cls, year, month=None, day=None):
"""Constructor.
Arguments:
year, month, day (required, base 1)
"""
# if month is None and isinstance(year, bytes) and len(year) == 4 and \
# 1 <= ord(year[2]) <= 12:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(year)
# self._hashcode = -1
# return self
year, month, day = _check_date_fields(year, month, day)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hashcode = -1
return self
# Additional constructors
@classmethod
def fromtimestamp(cls, t):
"Construct a date from a POSIX timestamp (like time.time())."
y, m, d, hh, mm, ss, weekday, jday, dst = _time.localtime(t)
return cls(y, m, d)
@classmethod
def today(cls):
"Construct a date from time.time()."
t = _time.time()
return cls.fromtimestamp(t)
@classmethod
def fromordinal(cls, n):
"""Contruct a date from a proleptic Gregorian ordinal.
January 1 of year 1 is day 1. Only the year, month and day are
non-zero in the result.
"""
y, m, d = _ord2ymd(n)
return cls(y, m, d)
# Conversions to string
def __repr__(self):
"""Convert to formal string, for repr().
>>> dt = datetime(2010, 1, 1)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0)'
>>> dt = datetime(2010, 1, 1, tzinfo=timezone.utc)
>>> repr(dt)
'datetime.datetime(2010, 1, 1, 0, 0, tzinfo=datetime.timezone.utc)'
"""
module = "datetime." if self.__class__ is date else ""
return "%s(%d, %d, %d)" % (module + self.__class__.__name__,
self._year,
self._month,
self._day)
# XXX These shouldn't depend on time.localtime(), because that
# clips the usable dates to [1970 .. 2038). At least ctime() is
# easily done without using strftime() -- that's better too because
# strftime("%c", ...) is locale specific.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d 00:00:00 %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day, self._year)
# def strftime(self, format):
# "Format using strftime()."
# return _wrap_strftime(self, format, self.timetuple())
def __format__(self, fmt):
if not isinstance(fmt, (str, unicode)):
raise ValueError("__format__ expects str or unicode, not %s" %
fmt.__class__.__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
def isoformat(self):
"""Return the date formatted according to ISO.
This is 'YYYY-MM-DD'.
References:
- http://www.w3.org/TR/NOTE-datetime
- http://www.cl.cam.ac.uk/~mgk25/iso-time.html
"""
# return "%04d-%02d-%02d" % (self._year, self._month, self._day)
return "%s-%s-%s" % (str(self._year).zfill(4), str(self._month).zfill(2), str(self._day).zfill(2))
__str__ = isoformat
# Read-only field accessors
@property
def year(self):
"""year (1-9999)"""
return self._year
@property
def month(self):
"""month (1-12)"""
return self._month
@property
def day(self):
"""day (1-31)"""
return self._day
# Standard conversions, __cmp__, __hash__ (and helpers)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
return _build_struct_time(self._year, self._month, self._day,
0, 0, 0, -1)
def toordinal(self):
"""Return proleptic Gregorian ordinal for the year, month and day.
January 1 of year 1 is day 1. Only the year, month and day values
contribute to the result.
"""
return _ymd2ord(self._year, self._month, self._day)
def replace(self, year=None, month=None, day=None):
"""Return a new date with new values for the specified fields."""
if year is None:
year = self._year
if month is None:
month = self._month
if day is None:
day = self._day
return date.__new__(type(self), year, month, day)
# Comparisons of date objects with other.
def __eq__(self, other):
if isinstance(other, date):
return self._cmp(other) == 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, date):
return self._cmp(other) != 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, date):
return self._cmp(other) <= 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, date):
return self._cmp(other) < 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, date):
return self._cmp(other) >= 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, date):
return self._cmp(other) > 0
elif hasattr(other, "timetuple"):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, date)
y, m, d = self._year, self._month, self._day
y2, m2, d2 = other._year, other._month, other._day
return _cmp((y, m, d), (y2, m2, d2))
def __hash__(self):
"Hash."
if self._hashcode == -1:
self._hashcode = hash(self._getstate())
return self._hashcode
# Computations
def _add_timedelta(self, other, factor):
y, m, d = _normalize_date(
self._year,
self._month,
self._day + other.days * factor)
return date(y, m, d)
def __add__(self, other):
"Add a date to a timedelta."
if isinstance(other, timedelta):
return self._add_timedelta(other, 1)
return NotImplemented
__radd__ = __add__
def __sub__(self, other):
"""Subtract two dates, or a date and a timedelta."""
if isinstance(other, date):
days1 = self.toordinal()
days2 = other.toordinal()
return timedelta._create(days1 - days2, 0, 0, False)
if isinstance(other, timedelta):
return self._add_timedelta(other, -1)
return NotImplemented
def weekday(self):
"Return day of the week, where Monday == 0 ... Sunday == 6."
return (self.toordinal() + 6) % 7
# Day-of-the-week and week-of-the-year, according to ISO
def isoweekday(self):
"Return day of the week, where Monday == 1 ... Sunday == 7."
# 1-Jan-0001 is a Monday
return self.toordinal() % 7 or 7
def isocalendar(self):
"""Return a 3-tuple containing ISO year, week number, and weekday.
The first ISO week of the year is the (Mon-Sun) week
containing the year's first Thursday; everything else derives
from that.
The first week is 1; Monday is 1 ... Sunday is 7.
ISO calendar algorithm taken from
http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
"""
year = self._year
week1monday = _isoweek1monday(year)
today = _ymd2ord(self._year, self._month, self._day)
# Internally, week and day have origin 0
week, day = divmod(today - week1monday, 7)
if week < 0:
year -= 1
week1monday = _isoweek1monday(year)
week, day = divmod(today - week1monday, 7)
elif week >= 52:
if today >= _isoweek1monday(year+1):
year += 1
week = 0
return year, week+1, day+1
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
return (_struct.pack('4B', yhi, ylo, self._month, self._day),)
def __setstate(self, string):
yhi, ylo, self._month, self._day = (ord(string[0]), ord(string[1]),
ord(string[2]), ord(string[3]))
self._year = yhi * 256 + ylo
def __reduce__(self):
return (self.__class__, self._getstate())
_date_class = date # so functions w/ args named "date" can get at the class
date.min = date(1, 1, 1)
date.max = date(9999, 12, 31)
date.resolution = timedelta(days=1)
class tzinfo(object):
"""Abstract base class for time zone info classes.
Subclasses must override the name(), utcoffset() and dst() methods.
"""
__slots__ = ()
def tzname(self, dt):
"datetime -> string name of time zone."
raise NotImplementedError("tzinfo subclass must override tzname()")
def utcoffset(self, dt):
"datetime -> minutes east of UTC (negative for west of UTC)"
raise NotImplementedError("tzinfo subclass must override utcoffset()")
def dst(self, dt):
"""datetime -> DST offset in minutes east of UTC.
Return 0 if DST not in effect. utcoffset() must include the DST
offset.
"""
raise NotImplementedError("tzinfo subclass must override dst()")
def fromutc(self, dt):
"datetime in UTC -> datetime in local time."
if not isinstance(dt, datetime):
raise TypeError("fromutc() requires a datetime argument")
if dt.tzinfo is not self:
raise ValueError("dt.tzinfo is not self")
dtoff = dt.utcoffset()
if dtoff is None:
raise ValueError("fromutc() requires a non-None utcoffset() "
"result")
# See the long comment block at the end of this file for an
# explanation of this algorithm.
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc() requires a non-None dst() result")
delta = dtoff - dtdst
if delta:
dt += delta
dtdst = dt.dst()
if dtdst is None:
raise ValueError("fromutc(): dt.dst gave inconsistent "
"results; cannot convert")
if dtdst:
return dt + dtdst
else:
return dt
# Pickle support.
def __reduce__(self):
getinitargs = getattr(self, "__getinitargs__", None)
if getinitargs:
args = getinitargs()
else:
args = ()
getstate = getattr(self, "__getstate__", None)
if getstate:
state = getstate()
else:
state = getattr(self, "__dict__", None) or None
if state is None:
return (self.__class__, args)
else:
return (self.__class__, args, state)
_tzinfo_class = tzinfo
class time(object):
"""Time with time zone.
Constructors:
__new__()
Operators:
__repr__, __str__
__cmp__, __hash__
Methods:
strftime()
isoformat()
utcoffset()
tzname()
dst()
Properties (readonly):
hour, minute, second, microsecond, tzinfo
"""
__slots__ = '_hour', '_minute', '_second', '_microsecond', '_tzinfo', '_hashcode'
def __new__(cls, hour=0, minute=0, second=0, microsecond=0, tzinfo=None):
"""Constructor.
Arguments:
hour, minute (required)
second, microsecond (default to zero)
tzinfo (default to None)
"""
# if isinstance(hour, bytes) and len(hour) == 6 and ord(hour[0]) < 24:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(hour, minute or None)
# self._hashcode = -1
# return self
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
# Standard conversions, __hash__ (and helpers)
# Comparisons of time objects with other.
def __eq__(self, other):
if isinstance(other, time):
return self._cmp(other) == 0
else:
return False
def __ne__(self, other):
if isinstance(other, time):
return self._cmp(other) != 0
else:
return True
def __le__(self, other):
if isinstance(other, time):
return self._cmp(other) <= 0
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, time):
return self._cmp(other) < 0
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, time):
return self._cmp(other) >= 0
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, time):
return self._cmp(other) > 0
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, time)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
myoff = self._utcoffset()
otoff = other._utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._hour, self._minute, self._second,
self._microsecond),
(other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
raise TypeError("can't compare offset-naive and offset-aware times")
myhhmm = self._hour * 60 + self._minute - myoff
othhmm = other._hour * 60 + other._minute - otoff
return _cmp((myhhmm, self._second, self._microsecond),
(othhmm, other._second, other._microsecond))
def __hash__(self):
"""Hash."""
if self._hashcode == -1:
tzoff = self._utcoffset()
if not tzoff: # zero or None
self._hashcode = hash(self._getstate()[0])
else:
h, m = divmod(self.hour * 60 + self.minute - tzoff, 60)
if 0 <= h < 24:
self._hashcode = hash(time(h, m, self.second, self.microsecond))
else:
self._hashcode = hash((h, m, self.second, self.microsecond))
return self._hashcode
# Conversion to string
def _tzstr(self, sep=":"):
"""Return formatted timezone offset (+xx:xx) or None."""
off = self._utcoffset()
if off is not None:
if off < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, 60)
assert 0 <= hh < 24
off = "%s%02d%s%02d" % (sign, hh, sep, mm)
return off
def __repr__(self):
"""Convert to formal string, for repr()."""
if self._microsecond != 0:
s = ", %d, %d" % (self._second, self._microsecond)
elif self._second != 0:
s = ", %d" % self._second
else:
s = ""
module = "datetime." if self.__class__ is time else ""
s= "%s(%d, %d%s)" % (module + self.__class__.__name__,
self._hour, self._minute, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def isoformat(self):
"""Return the time formatted according to ISO.
This is 'HH:MM:SS.mmmmmm+zz:zz', or 'HH:MM:SS+zz:zz' if
self.microsecond == 0.
"""
s = _format_time(self._hour, self._minute, self._second,
self._microsecond)
tz = self._tzstr()
if tz:
s += tz
return s
__str__ = isoformat
# def strftime(self, format):
# """Format using strftime(). The date part of the timestamp passed
# to underlying strftime should not be used.
# """
# # The year must be >= _MINYEARFMT else Python's strftime implementation
# # can raise a bogus exception.
# timetuple = (1900, 1, 1,
# self._hour, self._minute, self._second,
# 0, 1, -1)
# return _wrap_strftime(self, format, timetuple)
def __format__(self, fmt):
if not isinstance(fmt, (str, unicode)):
raise ValueError("__format__ expects str or unicode, not %s" %
fmt.__class__.__name__)
if len(fmt) != 0:
return self.strftime(fmt)
return str(self)
# Timezone functions
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
offset = _check_utc_offset("utcoffset", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _utcoffset(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(None)
offset = _check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(None)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
offset = _check_utc_offset("dst", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _dst(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(None)
offset = _check_utc_offset("dst", offset)
return offset
def replace(self, hour=None, minute=None, second=None, microsecond=None,
tzinfo=True):
"""Return a new time with new values for the specified fields."""
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return time.__new__(type(self),
hour, minute, second, microsecond, tzinfo)
def __nonzero__(self):
if self.second or self.microsecond:
return True
offset = self._utcoffset() or 0
return self.hour * 60 + self.minute != offset
# Pickle support.
def _getstate(self):
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = _struct.pack('6B', self._hour, self._minute, self._second,
us1, us2, us3)
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
self._hour, self._minute, self._second, us1, us2, us3 = (
ord(string[0]), ord(string[1]), ord(string[2]),
ord(string[3]), ord(string[4]), ord(string[5]))
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce__(self):
return (time, self._getstate())
_time_class = time # so functions w/ args named "time" can get at the class
time.min = time(0, 0, 0)
time.max = time(23, 59, 59, 999999)
time.resolution = timedelta(microseconds=1)
class datetime(date):
"""datetime(year, month, day[, hour[, minute[, second[, microsecond[,tzinfo]]]]])
The year, month and day arguments are required. tzinfo may be None, or an
instance of a tzinfo subclass. The remaining arguments may be ints or longs.
"""
__slots__ = date.__slots__ + time.__slots__
def __new__(cls, year, month=None, day=None, hour=0, minute=0, second=0,
microsecond=0, tzinfo=None):
# if isinstance(year, bytes) and len(year) == 10 and \
# 1 <= ord(year[2]) <= 12:
# # Pickle support
# self = object.__new__(cls)
# self.__setstate(year, month)
# self._hashcode = -1
# return self
year, month, day = _check_date_fields(year, month, day)
hour, minute, second, microsecond = _check_time_fields(
hour, minute, second, microsecond)
_check_tzinfo_arg(tzinfo)
self = object.__new__(cls)
self._year = year
self._month = month
self._day = day
self._hour = hour
self._minute = minute
self._second = second
self._microsecond = microsecond
self._tzinfo = tzinfo
self._hashcode = -1
return self
# Read-only field accessors
@property
def hour(self):
"""hour (0-23)"""
return self._hour
@property
def minute(self):
"""minute (0-59)"""
return self._minute
@property
def second(self):
"""second (0-59)"""
return self._second
@property
def microsecond(self):
"""microsecond (0-999999)"""
return self._microsecond
@property
def tzinfo(self):
"""timezone info object"""
return self._tzinfo
@classmethod
def fromtimestamp(cls, timestamp, tz=None):
"""Construct a datetime from a POSIX timestamp (like time.time()).
A timezone info object may be passed in as well.
"""
_check_tzinfo_arg(tz)
converter = _time.localtime if tz is None else _time.gmtime
self = cls._from_timestamp(converter, timestamp, tz)
if tz is not None:
self = tz.fromutc(self)
return self
@classmethod
def utcfromtimestamp(cls, t):
"Construct a UTC datetime from a POSIX timestamp (like time.time())."
return cls._from_timestamp(_time.gmtime, t, None)
@classmethod
def _from_timestamp(cls, converter, timestamp, tzinfo):
t_full = timestamp
timestamp = int(_math.floor(timestamp))
frac = t_full - timestamp
us = _round(frac * 1e6)
# If timestamp is less than one microsecond smaller than a
# full second, us can be rounded up to 1000000. In this case,
# roll over to seconds, otherwise, ValueError is raised
# by the constructor.
if us == 1000000:
timestamp += 1
us = 0
y, m, d, hh, mm, ss, weekday, jday, dst = converter(timestamp)
ss = min(ss, 59) # clamp out leap seconds if the platform has them
return cls(y, m, d, hh, mm, ss, us, tzinfo)
@classmethod
def now(cls, tz=None):
"Construct a datetime from time.time() and optional time zone info."
t = _time.time()
return cls.fromtimestamp(t, tz)
@classmethod
def utcnow(cls):
"Construct a UTC datetime from time.time()."
t = _time.time()
return cls.utcfromtimestamp(t)
@classmethod
def combine(cls, date, time):
"Construct a datetime from a given date and a given time."
if not isinstance(date, _date_class):
raise TypeError("date argument must be a date instance")
if not isinstance(time, _time_class):
raise TypeError("time argument must be a time instance")
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second, time.microsecond,
time.tzinfo)
def timetuple(self):
"Return local time tuple compatible with time.localtime()."
dst = self._dst()
if dst is None:
dst = -1
elif dst:
dst = 1
return _build_struct_time(self.year, self.month, self.day,
self.hour, self.minute, self.second,
dst)
def utctimetuple(self):
"Return UTC time tuple compatible with time.gmtime()."
y, m, d = self.year, self.month, self.day
hh, mm, ss = self.hour, self.minute, self.second
offset = self._utcoffset()
if offset: # neither None nor 0
mm -= offset
y, m, d, hh, mm, ss, _ = _normalize_datetime(
y, m, d, hh, mm, ss, 0, ignore_overflow=True)
return _build_struct_time(y, m, d, hh, mm, ss, 0)
def date(self):
"Return the date part."
return date(self._year, self._month, self._day)
def time(self):
"Return the time part, with tzinfo None."
return time(self.hour, self.minute, self.second, self.microsecond)
def timetz(self):
"Return the time part, with same tzinfo."
return time(self.hour, self.minute, self.second, self.microsecond,
self._tzinfo)
def replace(self, year=None, month=None, day=None, hour=None,
minute=None, second=None, microsecond=None, tzinfo=True):
"""Return a new datetime with new values for the specified fields."""
if year is None:
year = self.year
if month is None:
month = self.month
if day is None:
day = self.day
if hour is None:
hour = self.hour
if minute is None:
minute = self.minute
if second is None:
second = self.second
if microsecond is None:
microsecond = self.microsecond
if tzinfo is True:
tzinfo = self.tzinfo
return datetime.__new__(type(self),
year, month, day, hour, minute, second,
microsecond, tzinfo)
def astimezone(self, tz):
if not isinstance(tz, tzinfo):
raise TypeError("tz argument must be an instance of tzinfo")
mytz = self.tzinfo
if mytz is None:
raise ValueError("astimezone() requires an aware datetime")
if tz is mytz:
return self
# Convert self to UTC, and attach the new time zone object.
myoffset = self.utcoffset()
if myoffset is None:
raise ValueError("astimezone() requires an aware datetime")
utc = (self - myoffset).replace(tzinfo=tz)
# Convert from UTC to tz's local time.
return tz.fromutc(utc)
# Ways to produce a string.
def ctime(self):
"Return ctime() style string."
weekday = self.toordinal() % 7 or 7
return "%s %s %2d %02d:%02d:%02d %04d" % (
_DAYNAMES[weekday],
_MONTHNAMES[self._month],
self._day,
self._hour, self._minute, self._second,
self._year)
def isoformat(self, sep='T'):
"""Return the time formatted according to ISO.
This is 'YYYY-MM-DD HH:MM:SS.mmmmmm', or 'YYYY-MM-DD HH:MM:SS' if
self.microsecond == 0.
If self.tzinfo is not None, the UTC offset is also attached, giving
'YYYY-MM-DD HH:MM:SS.mmmmmm+HH:MM' or 'YYYY-MM-DD HH:MM:SS+HH:MM'.
Optional argument sep specifies the separator between date and
time, default 'T'.
"""
s = ("%04d-%02d-%02d%c" % (self._year, self._month, self._day, sep) +
_format_time(self._hour, self._minute, self._second,
self._microsecond))
off = self._utcoffset()
if off is not None:
if off < 0:
sign = "-"
off = -off
else:
sign = "+"
hh, mm = divmod(off, 60)
s += "%s%02d:%02d" % (sign, hh, mm)
return s
def __repr__(self):
"""Convert to formal string, for repr()."""
L = [self._year, self._month, self._day, # These are never zero
self._hour, self._minute, self._second, self._microsecond]
if L[-1] == 0:
del L[-1]
if L[-1] == 0:
del L[-1]
s = ", ".join(map(str, L))
module = "datetime." if self.__class__ is datetime else ""
s = "%s(%s)" % (module + self.__class__.__name__, s)
if self._tzinfo is not None:
assert s[-1:] == ")"
s = s[:-1] + ", tzinfo=%r" % self._tzinfo + ")"
return s
def __str__(self):
"Convert to string, for str()."
return self.isoformat(sep=' ')
# @classmethod
# def strptime(cls, date_string, format):
# 'string, format -> new datetime parsed from a string (like time.strptime()).'
# from _strptime import _strptime
# # _strptime._strptime returns a two-element tuple. The first
# # element is a time.struct_time object. The second is the
# # microseconds (which are not defined for time.struct_time).
# struct, micros = _strptime(date_string, format)
# return cls(*(struct[0:6] + (micros,)))
def utcoffset(self):
"""Return the timezone offset in minutes east of UTC (negative west of
UTC)."""
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
offset = _check_utc_offset("utcoffset", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _utcoffset(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.utcoffset(self)
offset = _check_utc_offset("utcoffset", offset)
return offset
def tzname(self):
"""Return the timezone name.
Note that the name is 100% informational -- there's no requirement that
it mean anything in particular. For example, "GMT", "UTC", "-500",
"-5:00", "EDT", "US/Eastern", "America/New York" are all valid replies.
"""
if self._tzinfo is None:
return None
name = self._tzinfo.tzname(self)
_check_tzname(name)
return name
def dst(self):
"""Return 0 if DST is not in effect, or the DST offset (in minutes
eastward) if DST is in effect.
This is purely informational; the DST offset has already been added to
the UTC offset returned by utcoffset() if applicable, so there's no
need to consult dst() unless you're interested in displaying the DST
info.
"""
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
offset = _check_utc_offset("dst", offset)
if offset is not None:
offset = timedelta._create(0, offset * 60, 0, True)
return offset
# Return an integer (or None) instead of a timedelta (or None).
def _dst(self):
if self._tzinfo is None:
return None
offset = self._tzinfo.dst(self)
offset = _check_utc_offset("dst", offset)
return offset
# Comparisons of datetime objects with other.
def __eq__(self, other):
if isinstance(other, datetime):
return self._cmp(other) == 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
return False
def __ne__(self, other):
if isinstance(other, datetime):
return self._cmp(other) != 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
return True
def __le__(self, other):
if isinstance(other, datetime):
return self._cmp(other) <= 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __lt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) < 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __ge__(self, other):
if isinstance(other, datetime):
return self._cmp(other) >= 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def __gt__(self, other):
if isinstance(other, datetime):
return self._cmp(other) > 0
elif hasattr(other, "timetuple") and not isinstance(other, date):
return NotImplemented
else:
_cmperror(self, other)
def _cmp(self, other):
assert isinstance(other, datetime)
mytz = self._tzinfo
ottz = other._tzinfo
myoff = otoff = None
if mytz is ottz:
base_compare = True
else:
if mytz is not None:
myoff = self._utcoffset()
if ottz is not None:
otoff = other._utcoffset()
base_compare = myoff == otoff
if base_compare:
return _cmp((self._year, self._month, self._day,
self._hour, self._minute, self._second,
self._microsecond),
(other._year, other._month, other._day,
other._hour, other._minute, other._second,
other._microsecond))
if myoff is None or otoff is None:
raise TypeError("can't compare offset-naive and offset-aware datetimes")
# XXX What follows could be done more efficiently...
diff = self - other # this will take offsets into account
if diff.days < 0:
return -1
return diff and 1 or 0
def _add_timedelta(self, other, factor):
y, m, d, hh, mm, ss, us = _normalize_datetime(
self._year,
self._month,
self._day + other.days * factor,
self._hour,
self._minute,
self._second + other.seconds * factor,
self._microsecond + other.microseconds * factor)
return datetime(y, m, d, hh, mm, ss, us, tzinfo=self._tzinfo)
def __add__(self, other):
"Add a datetime and a timedelta."
if not isinstance(other, timedelta):
return NotImplemented
return self._add_timedelta(other, 1)
__radd__ = __add__
def __sub__(self, other):
"Subtract two datetimes, or a datetime and a timedelta."
if not isinstance(other, datetime):
if isinstance(other, timedelta):
return self._add_timedelta(other, -1)
return NotImplemented
delta_d = self.toordinal() - other.toordinal()
delta_s = (self._hour - other._hour) * 3600 + \
(self._minute - other._minute) * 60 + \
(self._second - other._second)
delta_us = self._microsecond - other._microsecond
base = timedelta._create(delta_d, delta_s, delta_us, True)
if self._tzinfo is other._tzinfo:
return base
myoff = self._utcoffset()
otoff = other._utcoffset()
if myoff == otoff:
return base
if myoff is None or otoff is None:
raise TypeError("can't subtract offset-naive and offset-aware datetimes")
return base + timedelta(minutes = otoff-myoff)
def __hash__(self):
if self._hashcode == -1:
tzoff = self._utcoffset()
if tzoff is None:
self._hashcode = hash(self._getstate()[0])
else:
days = _ymd2ord(self.year, self.month, self.day)
seconds = self.hour * 3600 + (self.minute - tzoff) * 60 + self.second
self._hashcode = hash(timedelta(days, seconds, self.microsecond))
return self._hashcode
# Pickle support.
def _getstate(self):
yhi, ylo = divmod(self._year, 256)
us2, us3 = divmod(self._microsecond, 256)
us1, us2 = divmod(us2, 256)
basestate = _struct.pack('10B', yhi, ylo, self._month, self._day,
self._hour, self._minute, self._second,
us1, us2, us3)
if self._tzinfo is None:
return (basestate,)
else:
return (basestate, self._tzinfo)
def __setstate(self, string, tzinfo):
if tzinfo is not None and not isinstance(tzinfo, _tzinfo_class):
raise TypeError("bad tzinfo state arg")
(yhi, ylo, self._month, self._day, self._hour,
self._minute, self._second, us1, us2, us3) = (ord(string[0]),
ord(string[1]), ord(string[2]), ord(string[3]),
ord(string[4]), ord(string[5]), ord(string[6]),
ord(string[7]), ord(string[8]), ord(string[9]))
self._year = yhi * 256 + ylo
self._microsecond = (((us1 << 8) | us2) << 8) | us3
self._tzinfo = tzinfo
def __reduce__(self):
return (self.__class__, self._getstate())
datetime.min = datetime(1, 1, 1)
datetime.max = datetime(9999, 12, 31, 23, 59, 59, 999999)
datetime.resolution = timedelta(microseconds=1)
def _isoweek1monday(year):
# Helper to calculate the day number of the Monday starting week 1
# XXX This could be done more efficiently
THURSDAY = 3
firstday = _ymd2ord(year, 1, 1)
firstweekday = (firstday + 6) % 7 # See weekday() above
week1monday = firstday - firstweekday
if firstweekday > THURSDAY:
week1monday += 7
return week1monday
"""
Some time zone algebra. For a datetime x, let
x.n = x stripped of its timezone -- its naive time.
x.o = x.utcoffset(), and assuming that doesn't raise an exception or
return None
x.d = x.dst(), and assuming that doesn't raise an exception or
return None
x.s = x's standard offset, x.o - x.d
Now some derived rules, where k is a duration (timedelta).
1. x.o = x.s + x.d
This follows from the definition of x.s.
2. If x and y have the same tzinfo member, x.s = y.s.
This is actually a requirement, an assumption we need to make about
sane tzinfo classes.
3. The naive UTC time corresponding to x is x.n - x.o.
This is again a requirement for a sane tzinfo class.
4. (x+k).s = x.s
This follows from #2, and that datimetimetz+timedelta preserves tzinfo.
5. (x+k).n = x.n + k
Again follows from how arithmetic is defined.
Now we can explain tz.fromutc(x). Let's assume it's an interesting case
(meaning that the various tzinfo methods exist, and don't blow up or return
None when called).
The function wants to return a datetime y with timezone tz, equivalent to x.
x is already in UTC.
By #3, we want
y.n - y.o = x.n [1]
The algorithm starts by attaching tz to x.n, and calling that y. So
x.n = y.n at the start. Then it wants to add a duration k to y, so that [1]
becomes true; in effect, we want to solve [2] for k:
(y+k).n - (y+k).o = x.n [2]
By #1, this is the same as
(y+k).n - ((y+k).s + (y+k).d) = x.n [3]
By #5, (y+k).n = y.n + k, which equals x.n + k because x.n=y.n at the start.
Substituting that into [3],
x.n + k - (y+k).s - (y+k).d = x.n; the x.n terms cancel, leaving
k - (y+k).s - (y+k).d = 0; rearranging,
k = (y+k).s - (y+k).d; by #4, (y+k).s == y.s, so
k = y.s - (y+k).d
On the RHS, (y+k).d can't be computed directly, but y.s can be, and we
approximate k by ignoring the (y+k).d term at first. Note that k can't be
very large, since all offset-returning methods return a duration of magnitude
less than 24 hours. For that reason, if y is firmly in std time, (y+k).d must
be 0, so ignoring it has no consequence then.
In any case, the new value is
z = y + y.s [4]
It's helpful to step back at look at [4] from a higher level: it's simply
mapping from UTC to tz's standard time.
At this point, if
z.n - z.o = x.n [5]
we have an equivalent time, and are almost done. The insecurity here is
at the start of daylight time. Picture US Eastern for concreteness. The wall
time jumps from 1:59 to 3:00, and wall hours of the form 2:MM don't make good
sense then. The docs ask that an Eastern tzinfo class consider such a time to
be EDT (because it's "after 2"), which is a redundant spelling of 1:MM EST
on the day DST starts. We want to return the 1:MM EST spelling because that's
the only spelling that makes sense on the local wall clock.
In fact, if [5] holds at this point, we do have the standard-time spelling,
but that takes a bit of proof. We first prove a stronger result. What's the
difference between the LHS and RHS of [5]? Let
diff = x.n - (z.n - z.o) [6]
Now
z.n = by [4]
(y + y.s).n = by #5
y.n + y.s = since y.n = x.n
x.n + y.s = since z and y are have the same tzinfo member,
y.s = z.s by #2
x.n + z.s
Plugging that back into [6] gives
diff =
x.n - ((x.n + z.s) - z.o) = expanding
x.n - x.n - z.s + z.o = cancelling
- z.s + z.o = by #2
z.d
So diff = z.d.
If [5] is true now, diff = 0, so z.d = 0 too, and we have the standard-time
spelling we wanted in the endcase described above. We're done. Contrarily,
if z.d = 0, then we have a UTC equivalent, and are also done.
If [5] is not true now, diff = z.d != 0, and z.d is the offset we need to
add to z (in effect, z is in tz's standard time, and we need to shift the
local clock into tz's daylight time).
Let
z' = z + z.d = z + diff [7]
and we can again ask whether
z'.n - z'.o = x.n [8]
If so, we're done. If not, the tzinfo class is insane, according to the
assumptions we've made. This also requires a bit of proof. As before, let's
compute the difference between the LHS and RHS of [8] (and skipping some of
the justifications for the kinds of substitutions we've done several times
already):
diff' = x.n - (z'.n - z'.o) = replacing z'.n via [7]
x.n - (z.n + diff - z'.o) = replacing diff via [6]
x.n - (z.n + x.n - (z.n - z.o) - z'.o) =
x.n - z.n - x.n + z.n - z.o + z'.o = cancel x.n
- z.n + z.n - z.o + z'.o = cancel z.n
- z.o + z'.o = #1 twice
-z.s - z.d + z'.s + z'.d = z and z' have same tzinfo
z'.d - z.d
So z' is UTC-equivalent to x iff z'.d = z.d at this point. If they are equal,
we've found the UTC-equivalent so are done. In fact, we stop with [7] and
return z', not bothering to compute z'.d.
How could z.d and z'd differ? z' = z + z.d [7], so merely moving z' by
a dst() offset, and starting *from* a time already in DST (we know z.d != 0),
would have to change the result dst() returns: we start in DST, and moving
a little further into it takes us out of DST.
There isn't a sane case where this can happen. The closest it gets is at
the end of DST, where there's an hour in UTC with no spelling in a hybrid
tzinfo class. In US Eastern, that's 5:MM UTC = 0:MM EST = 1:MM EDT. During
that hour, on an Eastern clock 1:MM is taken as being in standard time (6:MM
UTC) because the docs insist on that, but 0:MM is taken as being in daylight
time (4:MM UTC). There is no local time mapping to 5:MM UTC. The local
clock jumps from 1:59 back to 1:00 again, and repeats the 1:MM hour in
standard time. Since that's what the local clock *does*, we want to map both
UTC hours 5:MM and 6:MM to 1:MM Eastern. The result is ambiguous
in local time, but so it goes -- it's the way the local clock works.
When x = 5:MM UTC is the input to this algorithm, x.o=0, y.o=-5 and y.d=0,
so z=0:MM. z.d=60 (minutes) then, so [5] doesn't hold and we keep going.
z' = z + z.d = 1:MM then, and z'.d=0, and z'.d - z.d = -60 != 0 so [8]
(correctly) concludes that z' is not UTC-equivalent to x.
Because we know z.d said z was in daylight time (else [5] would have held and
we would have stopped then), and we know z.d != z'.d (else [8] would have held
and we have stopped then), and there are only 2 possible values dst() can
return in Eastern, it follows that z'.d must be 0 (which it is in the example,
but the reasoning doesn't depend on the example -- it depends on there being
two possible dst() outcomes, one zero and the other non-zero). Therefore
z' must be in standard time, and is the spelling we want in this case.
Note again that z' is not UTC-equivalent as far as the hybrid tzinfo class is
concerned (because it takes z' as being in standard time rather than the
daylight time we intend here), but returning it gives the real-life "local
clock repeats an hour" behavior when mapping the "unspellable" UTC hour into
tz.
When the input is 6:MM, z=1:MM and z.d=0, and we stop at once, again with
the 1:MM standard time spelling we want.
So how can this break? One of the assumptions must be violated. Two
possibilities:
1) [2] effectively says that y.s is invariant across all y belong to a given
time zone. This isn't true if, for political reasons or continental drift,
a region decides to change its base offset from UTC.
2) There may be versions of "double daylight" time where the tail end of
the analysis gives up a step too early. I haven't thought about that
enough to say.
In any case, it's clear that the default fromutc() is strong enough to handle
"almost all" time zones: so long as the standard offset is invariant, it
doesn't matter if daylight time transition points change from year to year, or
if daylight time is skipped in some years; it doesn't matter how large or
small dst() may get within its bounds; and it doesn't even matter if some
perverse time zone returns a negative dst()). So a breaking case must be
pretty bizarre, and a tzinfo subclass can override fromutc() if it is.
"""
| apache-2.0 |
bkirui/odoo | addons/sale_stock/report/sale_report.py | 231 | 2100 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp import tools
class sale_report(osv.osv):
_inherit = "sale.report"
_columns = {
'shipped': fields.boolean('Shipped', readonly=True),
'shipped_qty_1': fields.integer('Shipped', readonly=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse',readonly=True),
'state': fields.selection([
('draft', 'Quotation'),
('sent', 'Quotation Sent'),
('waiting_date', 'Waiting Schedule'),
('manual', 'Manual In Progress'),
('progress', 'In Progress'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
], 'Order Status', readonly=True),
}
def _select(self):
return super(sale_report, self)._select() + ", s.warehouse_id as warehouse_id, s.shipped, s.shipped::integer as shipped_qty_1"
def _group_by(self):
return super(sale_report, self)._group_by() + ", s.warehouse_id, s.shipped"
| agpl-3.0 |
Tchanders/socorro | socorro/unittest/testlib/libTestconfig.py | 11 | 2780 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import socorro.lib.ConfigurationManager as cm
import datetime
from socorro.unittest.config.commonconfig \
import databaseHost as database_hostname
try:
from socorro.unittest.config.commonconfig \
import databasePort as database_port
except:
database_port = 5432
from socorro.unittest.config.commonconfig \
import oldDatabaseName as database_name
from socorro.unittest.config.commonconfig \
import databaseUserName as database_username
from socorro.unittest.config.commonconfig \
import databasePassword as database_password
logFilePathname = cm.Option()
logFilePathname.doc = 'full pathname for the log file'
logFilePathname.default = '%(testDir)s/logs/lib_test.log'
logFileMaximumSize = cm.Option()
logFileMaximumSize.doc = 'maximum size in bytes of the log file'
logFileMaximumSize.default = 1000000
logFileMaximumBackupHistory = cm.Option()
logFileMaximumBackupHistory.doc = 'maximum number of log files to keep'
logFileMaximumBackupHistory.default = 50
logFileLineFormatString = cm.Option()
logFileLineFormatString.doc = 'python logging system format for log file entries'
logFileLineFormatString.default = '%(asctime)s %(levelname)s - %(message)s'
logFileErrorLoggingLevel = cm.Option()
logFileErrorLoggingLevel.doc = 'logging level for the log file (10 - DEBUG, 20 - INFO, 30 - WARNING, 40 - ERROR, 50 - CRITICAL)'
logFileErrorLoggingLevel.default = 10
#syslogHost = cm.Option()
#syslogHost.doc = 'syslog hostname'
#syslogHost.default = 'localhost'
#syslogPort = cm.Option()
#syslogPort.doc = 'syslog port'
#syslogPort.default = 514
#syslogFacilityString = cm.Option()
#syslogFacilityString.doc = 'syslog facility string ("user", "local0", etc)'
#syslogFacilityString.default = 'user'
#syslogLineFormatString = cm.Option()
#syslogLineFormatString.doc = 'python logging system format for syslog entries'
#syslogLineFormatString.default = 'Socorro (pid %(process)d): %(asctime)s %(levelname)s - %(threadName)s - %(message)s'
#syslogErrorLoggingLevel = cm.Option()
#syslogErrorLoggingLevel.doc = 'logging level for the log file (10 - DEBUG, 20 - INFO, 30 - WARNING, 40 - ERROR, 50 - CRITICAL)'
#syslogErrorLoggingLevel.default = 10
stderrLineFormatString = cm.Option()
stderrLineFormatString.doc = 'python logging system format for logging to stderr'
stderrLineFormatString.default = '%(asctime)s %(levelname)s - %(message)s'
stderrErrorLoggingLevel = cm.Option()
stderrErrorLoggingLevel.doc = 'logging level for the logging to stderr (10 - DEBUG, 20 - INFO, 30 - WARNING, 40 - ERROR, 50 - CRITICAL)'
stderrErrorLoggingLevel.default = 40
| mpl-2.0 |
pamfilos/invenio | modules/webjournal/lib/elements/bfe_webjournal_sub_navigation.py | 25 | 6486 | # -*- coding: utf-8 -*-
## $Id: bfe_webjournal_CERNBulletinSubNavigation.py,v 1.13 2009/02/12 10:00:57 jerome Exp $
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
WebJournal element - Displays vertical subnavigation menu in detailed
article pages.
"""
from invenio.bibformat_engine import BibFormatObject
from invenio.messages import gettext_set_language
from invenio.webjournal_utils import \
parse_url_string, \
make_journal_url, \
get_journal_articles,\
get_journal_categories
def format_element(bfo, new_articles_first='yes',
subject_to_css_class_kb="WebJournalSubject2CSSClass",
display_all_category_articles='no'):
"""
Creates a navigation for articles in the same issue and category.
@param new_articles_first: if 'yes', display new articles before other articles
@param subject_to_css_class_kb: knowledge base that maps 595__a to a CSS class
@param display_all_category_articles: if yes, display all articles, whatever category is selected
"""
# get variables
args = parse_url_string(bfo.user_info['uri'])
this_recid = bfo.control_field('001')
this_issue_number = args["issue"]
category_name = args["category"]
journal_name = args["journal_name"]
ln = bfo.lang
_ = gettext_set_language(ln)
this_title = ""
if ln == "fr":
if bfo.fields('246_1a'):
this_title = bfo.fields('246_1a')[0]
elif bfo.fields('245__a'):
this_title = bfo.fields('245__a')[0]
else:
if bfo.fields('245__a'):
this_title = bfo.fields('245__a')[0]
elif bfo.fields('246_1a'):
this_title = bfo.fields('246_1a')[0]
journal_categories = [category_name]
if display_all_category_articles.lower() == 'yes':
# Let's retrieve all categories. Ok, we are not supposed to do
# that with that element, but if journal editor wants...
journal_categories = get_journal_categories(journal_name,
this_issue_number)
menu_out = ''
for category in journal_categories:
ordered_articles = get_journal_articles(journal_name,
this_issue_number,
category,
newest_first=new_articles_first.lower() == 'yes')
new_articles_only = False
if ordered_articles.keys() and max(ordered_articles.keys()) < 0:
# If there are only new articles, don't bother marking them as
# new
new_articles_only = True
menu_out += '<div class="subNavigationMenu">'
order_numbers = ordered_articles.keys()
order_numbers.sort()
for order_number in order_numbers:
for article_id in ordered_articles[order_number]:
# A record is considered as new if its position is
# negative and there are some non-new articles
article_is_new = (order_number < 0 and not new_articles_only)
if str(article_id) == this_recid:
# Mark as active
# Get CSS class (if relevant)
notes = bfo.fields('595__a')
css_classes = [bfo.kb(subject_to_css_class_kb, note, None) \
for note in notes]
css_classes = [css_class for css_class in css_classes \
if css_class is not None]
if article_is_new:
css_classes.append('new')
separator = bfo.field('594__a')
if separator == "YES":
menu_out += '''<hr/>'''
menu_out += '''<div class="active">
<div class="subNavigationMenuItem %s">%s</div></div>''' % \
(' '.join(css_classes),
this_title)
else:
temp_rec = BibFormatObject(article_id)
title = ''
if ln == "fr":
title = temp_rec.field('246_1a')
if title == '':
title = temp_rec.field('245__a')
else:
title = temp_rec.field('245__a')
if title == '':
title = temp_rec.field('246_1a')
# Get CSS class (if relevant)
notes = temp_rec.fields('595__a')
css_classes = [temp_rec.kb(subject_to_css_class_kb, note, None) \
for note in notes]
css_classes = [css_class for css_class in css_classes \
if css_class is not None]
if article_is_new:
css_classes.append('new')
separator = temp_rec.field('594__a')
if separator == "YES":
menu_out += '''<hr/>'''
menu_out += '''<div class="subNavigationMenuItem %s">
<a href="%s">%s</a></div>
''' % (' '.join(css_classes),
make_journal_url(bfo.user_info['uri'],
{'recid': article_id,
'ln': bfo.lang,
'category': category}),
title)
menu_out += '</div>'
return menu_out
def escape_values(bfo):
"""
Called by BibFormat in order to check if output of this element
should be escaped.
"""
return 0
| gpl-2.0 |
mblondel/scikit-learn | doc/tutorial/text_analytics/skeletons/exercise_02_sentiment.py | 256 | 2406 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
ric03uec/boto | boto/sdb/db/test_db.py | 33 | 5431 | import logging
import time
from datetime import datetime
from boto.sdb.db.model import Model
from boto.sdb.db.property import StringProperty, IntegerProperty, BooleanProperty
from boto.sdb.db.property import DateTimeProperty, FloatProperty, ReferenceProperty
from boto.sdb.db.property import PasswordProperty, ListProperty, MapProperty
from boto.exception import SDBPersistenceError
logging.basicConfig()
log = logging.getLogger('test_db')
log.setLevel(logging.DEBUG)
_objects = {}
#
# This will eventually be moved to the boto.tests module and become a real unit test
# but for now it will live here. It shows examples of each of the Property types in
# use and tests the basic operations.
#
class TestBasic(Model):
name = StringProperty()
size = IntegerProperty()
foo = BooleanProperty()
date = DateTimeProperty()
class TestFloat(Model):
name = StringProperty()
value = FloatProperty()
class TestRequired(Model):
req = StringProperty(required=True, default='foo')
class TestReference(Model):
ref = ReferenceProperty(reference_class=TestBasic, collection_name='refs')
class TestSubClass(TestBasic):
answer = IntegerProperty()
class TestPassword(Model):
password = PasswordProperty()
class TestList(Model):
name = StringProperty()
nums = ListProperty(int)
class TestMap(Model):
name = StringProperty()
map = MapProperty()
class TestListReference(Model):
name = StringProperty()
basics = ListProperty(TestBasic)
class TestAutoNow(Model):
create_date = DateTimeProperty(auto_now_add=True)
modified_date = DateTimeProperty(auto_now=True)
class TestUnique(Model):
name = StringProperty(unique=True)
def test_basic():
global _objects
t = TestBasic()
t.name = 'simple'
t.size = -42
t.foo = True
t.date = datetime.now()
log.debug('saving object')
t.put()
_objects['test_basic_t'] = t
time.sleep(5)
log.debug('now try retrieving it')
tt = TestBasic.get_by_id(t.id)
_objects['test_basic_tt'] = tt
assert tt.id == t.id
l = TestBasic.get_by_id([t.id])
assert len(l) == 1
assert l[0].id == t.id
assert t.size == tt.size
assert t.foo == tt.foo
assert t.name == tt.name
#assert t.date == tt.date
return t
def test_float():
global _objects
t = TestFloat()
t.name = 'float object'
t.value = 98.6
log.debug('saving object')
t.save()
_objects['test_float_t'] = t
time.sleep(5)
log.debug('now try retrieving it')
tt = TestFloat.get_by_id(t.id)
_objects['test_float_tt'] = tt
assert tt.id == t.id
assert tt.name == t.name
assert tt.value == t.value
return t
def test_required():
global _objects
t = TestRequired()
_objects['test_required_t'] = t
t.put()
return t
def test_reference(t=None):
global _objects
if not t:
t = test_basic()
tt = TestReference()
tt.ref = t
tt.put()
time.sleep(10)
tt = TestReference.get_by_id(tt.id)
_objects['test_reference_tt'] = tt
assert tt.ref.id == t.id
for o in t.refs:
log.debug(o)
def test_subclass():
global _objects
t = TestSubClass()
_objects['test_subclass_t'] = t
t.name = 'a subclass'
t.size = -489
t.save()
def test_password():
global _objects
t = TestPassword()
_objects['test_password_t'] = t
t.password = "foo"
t.save()
time.sleep(5)
# Make sure it stored ok
tt = TestPassword.get_by_id(t.id)
_objects['test_password_tt'] = tt
#Testing password equality
assert tt.password == "foo"
#Testing password not stored as string
assert str(tt.password) != "foo"
def test_list():
global _objects
t = TestList()
_objects['test_list_t'] = t
t.name = 'a list of ints'
t.nums = [1, 2, 3, 4, 5]
t.put()
tt = TestList.get_by_id(t.id)
_objects['test_list_tt'] = tt
assert tt.name == t.name
for n in tt.nums:
assert isinstance(n, int)
def test_list_reference():
global _objects
t = TestBasic()
t.put()
_objects['test_list_ref_t'] = t
tt = TestListReference()
tt.name = "foo"
tt.basics = [t]
tt.put()
time.sleep(5)
_objects['test_list_ref_tt'] = tt
ttt = TestListReference.get_by_id(tt.id)
assert ttt.basics[0].id == t.id
def test_unique():
global _objects
t = TestUnique()
name = 'foo' + str(int(time.time()))
t.name = name
t.put()
_objects['test_unique_t'] = t
time.sleep(10)
tt = TestUnique()
_objects['test_unique_tt'] = tt
tt.name = name
try:
tt.put()
assert False
except(SDBPersistenceError):
pass
def test_datetime():
global _objects
t = TestAutoNow()
t.put()
_objects['test_datetime_t'] = t
time.sleep(5)
tt = TestAutoNow.get_by_id(t.id)
assert tt.create_date.timetuple() == t.create_date.timetuple()
def test():
log.info('test_basic')
t1 = test_basic()
log.info('test_required')
test_required()
log.info('test_reference')
test_reference(t1)
log.info('test_subclass')
test_subclass()
log.info('test_password')
test_password()
log.info('test_list')
test_list()
log.info('test_list_reference')
test_list_reference()
log.info("test_datetime")
test_datetime()
log.info('test_unique')
test_unique()
if __name__ == "__main__":
test()
| mit |
shumik/skencil-c | Sketch/UI/blenddlg.py | 1 | 5136 | # Sketch - A Python-based interactive drawing program
# Copyright (C) 1997, 1998, 1999 by Bernhard Herzog
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import sketchdlg
from Tkinter import Frame, Label, IntVar
from Tkinter import RIGHT, BOTTOM, X, Y, BOTH, LEFT, TOP, GROOVE, E,\
DISABLED, NORMAL
from tkext import UpdatedButton, MyEntry
from miniscroll import MiniScroller
from sketchdlg import PropertyPanel
from Sketch.const import SELECTION
from Sketch.Graphics.blendgroup import BlendGroup, BlendInterpolation, \
SelectStart, SelectEnd
from Sketch import _, config
class BlendPanel(PropertyPanel):
title = _("Blend")
def __init__(self, master, main_window, doc):
PropertyPanel.__init__(self, master, main_window, doc,
name = 'blenddlg')
def build_dlg(self):
top = self.top
button_frame = Frame(top)
button_frame.pack(side = BOTTOM, fill = BOTH, expand = 1)
self.update_buttons = []
button = UpdatedButton(top, text = _("Apply"),
command = self.apply_blend,
sensitivecb = self.doc_can_blend)
button.pack(in_ = button_frame, side = LEFT, expand = 1, fill = X)
self.Subscribe(SELECTION, button.Update)
button = UpdatedButton(top, text = _("Close"),
command = self.close_dlg)
button.pack(in_ = button_frame, side = RIGHT, expand = 1, fill = X)
steps_frame = Frame(top, relief = GROOVE, bd = 2)
steps_frame.pack(side = TOP, fill = X, expand = 1)
label = Label(steps_frame, text = _("Steps"))
label.pack(side = LEFT, anchor = E)
self.var_steps = IntVar(top)
self.var_steps.set(config.preferences.blend_panel_default_steps)
self.entry = MyEntry(steps_frame, name = 'steps',
textvariable = self.var_steps,
command = self.apply_blend)
self.entry.pack(side = LEFT, expand = 1, fill = X, anchor = E)
scroll = MiniScroller(steps_frame, variable = self.var_steps,
min = 2, max = None, step = 1)
scroll.pack(side = LEFT, fill = Y)
button = UpdatedButton(top, text = _("Select Start"),
sensitivecb = self.can_select,
command = self.select_control,
args = SelectStart)
button.pack(side = BOTTOM, fill = X, expand = 1)
self.Subscribe(SELECTION, button.Update)
button = UpdatedButton(top, text = _("Select End"),
sensitivecb = self.can_select,
command = self.select_control,
args = SelectEnd)
button.pack(side = BOTTOM, fill = X, expand = 1)
self.Subscribe(SELECTION, button.Update)
def doc_can_blend(self):
return ((self.document.CanBlend() or self.current_obj_is_blend())
and self.var_steps.get() >= 2)
def current_obj_is_blend(self):
object = self.document.CurrentObject()
return (object is not None
and (object.is_BlendInterpolation
or (object.is_Blend and object.NumObjects() == 3)))
def current_object(self):
# assume current_obj_is_blend() yields true
object = self.document.CurrentObject()
if object.is_Blend:
# XXX reaching into object.objects is ugly
object = object.objects[1]
return object
def init_from_doc(self):
self.Update()
self.issue(SELECTION)
def Update(self):
if self.current_obj_is_blend():
steps = self.current_object().Steps()
self.var_steps.set(steps)
if self.doc_can_blend():
self.entry['state'] = NORMAL
else:
self.entry['state'] = DISABLED
def apply_blend(self, *args):
steps = self.var_steps.get()
if self.current_obj_is_blend() and steps >= 2:
doc = self.document
doc.BeginTransaction(_("Set %d Blend Steps") % steps)
try:
try:
doc.AddUndo(self.current_object().SetParameters(steps))
except:
doc.AbortTransaction()
finally:
doc.EndTransaction()
else:
self.document.Blend(steps)
def can_select(self):
object = self.document.CurrentObject()
return (object is not None
and (object.parent.is_Blend or object.is_Blend))
def select_control(self, which):
object = self.document.CurrentObject()
if object is not None:
if object.is_Blend:
# XXX reaching into object.objects is ugly
if which == SelectStart:
child = object.objects[0]
else:
child = object.objects[-1]
self.document.SelectObject(child)
elif object.parent.is_Blend:
object.parent.SelectControl(object, which)
| gpl-2.0 |
pwoodworth/intellij-community | python/lib/Lib/site-packages/django/forms/extras/widgets.py | 73 | 4447 | """
Extra HTML Widget classes
"""
import time
import datetime
import re
from django.forms.widgets import Widget, Select
from django.utils import datetime_safe
from django.utils.dates import MONTHS
from django.utils.safestring import mark_safe
from django.utils.formats import get_format
from django.conf import settings
__all__ = ('SelectDateWidget',)
RE_DATE = re.compile(r'(\d{4})-(\d\d?)-(\d\d?)$')
class SelectDateWidget(Widget):
"""
A Widget that splits date input into three <select> boxes.
This also serves as an example of a Widget that has more than one HTML
element and hence implements value_from_datadict.
"""
none_value = (0, '---')
month_field = '%s_month'
day_field = '%s_day'
year_field = '%s_year'
def __init__(self, attrs=None, years=None, required=True):
# years is an optional list/tuple of years to use in the "year" select box.
self.attrs = attrs or {}
self.required = required
if years:
self.years = years
else:
this_year = datetime.date.today().year
self.years = range(this_year, this_year+10)
def render(self, name, value, attrs=None):
try:
year_val, month_val, day_val = value.year, value.month, value.day
except AttributeError:
year_val = month_val = day_val = None
if isinstance(value, basestring):
if settings.USE_L10N:
try:
input_format = get_format('DATE_INPUT_FORMATS')[0]
# Python 2.4 compatibility:
# v = datetime.datetime.strptime(value, input_format)
# would be clearer, but datetime.strptime was added in
# Python 2.5
v = datetime.datetime(*(time.strptime(value, input_format)[0:6]))
year_val, month_val, day_val = v.year, v.month, v.day
except ValueError:
pass
else:
match = RE_DATE.match(value)
if match:
year_val, month_val, day_val = [int(v) for v in match.groups()]
choices = [(i, i) for i in self.years]
year_html = self.create_select(name, self.year_field, value, year_val, choices)
choices = MONTHS.items()
month_html = self.create_select(name, self.month_field, value, month_val, choices)
choices = [(i, i) for i in range(1, 32)]
day_html = self.create_select(name, self.day_field, value, day_val, choices)
format = get_format('DATE_FORMAT')
escaped = False
output = []
for char in format:
if escaped:
escaped = False
elif char == '\\':
escaped = True
elif char in 'Yy':
output.append(year_html)
elif char in 'bEFMmNn':
output.append(month_html)
elif char in 'dj':
output.append(day_html)
return mark_safe(u'\n'.join(output))
def id_for_label(self, id_):
return '%s_month' % id_
id_for_label = classmethod(id_for_label)
def value_from_datadict(self, data, files, name):
y = data.get(self.year_field % name)
m = data.get(self.month_field % name)
d = data.get(self.day_field % name)
if y == m == d == "0":
return None
if y and m and d:
if settings.USE_L10N:
input_format = get_format('DATE_INPUT_FORMATS')[0]
try:
date_value = datetime.date(int(y), int(m), int(d))
except ValueError:
pass
else:
date_value = datetime_safe.new_date(date_value)
return date_value.strftime(input_format)
else:
return '%s-%s-%s' % (y, m, d)
return data.get(name, None)
def create_select(self, name, field, value, val, choices):
if 'id' in self.attrs:
id_ = self.attrs['id']
else:
id_ = 'id_%s' % name
if not (self.required and val):
choices.insert(0, self.none_value)
local_attrs = self.build_attrs(id=field % id_)
s = Select(choices=choices)
select_html = s.render(field % name, val, local_attrs)
return select_html
| apache-2.0 |
likaiwalkman/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/checkout/scm/svn.py | 119 | 17277 | # Copyright (c) 2009, 2010, 2011 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import random
import re
import shutil
import string
import sys
import tempfile
from webkitpy.common.memoized import memoized
from webkitpy.common.system.executive import Executive, ScriptError
from .scm import AuthenticationError, SCM, commit_error_handler
_log = logging.getLogger(__name__)
# A mixin class that represents common functionality for SVN and Git-SVN.
class SVNRepository(object):
# FIXME: These belong in common.config.urls
svn_server_host = "svn.webkit.org"
svn_server_realm = "<http://svn.webkit.org:80> Mac OS Forge"
def has_authorization_for_realm(self, realm, home_directory=os.getenv("HOME")):
# If we are working on a file:// repository realm will be None
if realm is None:
return True
# ignore false positives for methods implemented in the mixee class. pylint: disable=E1101
# Assumes find and grep are installed.
if not os.path.isdir(os.path.join(home_directory, ".subversion")):
return False
find_args = ["find", ".subversion", "-type", "f", "-exec", "grep", "-q", realm, "{}", ";", "-print"]
find_output = self.run(find_args, cwd=home_directory, error_handler=Executive.ignore_error).rstrip()
if not find_output or not os.path.isfile(os.path.join(home_directory, find_output)):
return False
# Subversion either stores the password in the credential file, indicated by the presence of the key "password",
# or uses the system password store (e.g. Keychain on Mac OS X) as indicated by the presence of the key "passtype".
# We assume that these keys will not coincide with the actual credential data (e.g. that a person's username
# isn't "password") so that we can use grep.
if self.run(["grep", "password", find_output], cwd=home_directory, return_exit_code=True) == 0:
return True
return self.run(["grep", "passtype", find_output], cwd=home_directory, return_exit_code=True) == 0
class SVN(SCM, SVNRepository):
executable_name = "svn"
_svn_metadata_files = frozenset(['.svn', '_svn'])
def __init__(self, cwd, patch_directories, **kwargs):
SCM.__init__(self, cwd, **kwargs)
self._bogus_dir = None
if patch_directories == []:
raise Exception(message='Empty list of patch directories passed to SCM.__init__')
elif patch_directories == None:
self._patch_directories = [self._filesystem.relpath(cwd, self.checkout_root)]
else:
self._patch_directories = patch_directories
@classmethod
def in_working_directory(cls, path, executive=None):
if os.path.isdir(os.path.join(path, '.svn')):
# This is a fast shortcut for svn info that is usually correct for SVN < 1.7,
# but doesn't work for SVN >= 1.7.
return True
executive = executive or Executive()
svn_info_args = [cls.executable_name, 'info']
exit_code = executive.run_command(svn_info_args, cwd=path, return_exit_code=True)
return (exit_code == 0)
def find_uuid(self, path):
if not self.in_working_directory(path):
return None
return self.value_from_svn_info(path, 'Repository UUID')
@classmethod
def value_from_svn_info(cls, path, field_name):
svn_info_args = [cls.executable_name, 'info']
# FIXME: This method should use a passed in executive or be made an instance method and use self._executive.
info_output = Executive().run_command(svn_info_args, cwd=path).rstrip()
match = re.search("^%s: (?P<value>.+)$" % field_name, info_output, re.MULTILINE)
if not match:
raise ScriptError(script_args=svn_info_args, message='svn info did not contain a %s.' % field_name)
return match.group('value').rstrip('\r')
def find_checkout_root(self, path):
uuid = self.find_uuid(path)
# If |path| is not in a working directory, we're supposed to return |path|.
if not uuid:
return path
# Search up the directory hierarchy until we find a different UUID.
last_path = None
while True:
if uuid != self.find_uuid(path):
return last_path
last_path = path
(path, last_component) = self._filesystem.split(path)
if last_path == path:
return None
@staticmethod
def commit_success_regexp():
return "^Committed revision (?P<svn_revision>\d+)\.$"
def _run_svn(self, args, **kwargs):
return self.run([self.executable_name] + args, **kwargs)
@memoized
def svn_version(self):
return self._run_svn(['--version', '--quiet'])
def has_working_directory_changes(self):
# FIXME: What about files which are not committed yet?
return self._run_svn(["diff"], cwd=self.checkout_root, decode_output=False) != ""
def discard_working_directory_changes(self):
# Make sure there are no locks lying around from a previously aborted svn invocation.
# This is slightly dangerous, as it's possible the user is running another svn process
# on this checkout at the same time. However, it's much more likely that we're running
# under windows and svn just sucks (or the user interrupted svn and it failed to clean up).
self._run_svn(["cleanup"], cwd=self.checkout_root)
# svn revert -R is not as awesome as git reset --hard.
# It will leave added files around, causing later svn update
# calls to fail on the bots. We make this mirror git reset --hard
# by deleting any added files as well.
added_files = reversed(sorted(self.added_files()))
# added_files() returns directories for SVN, we walk the files in reverse path
# length order so that we remove files before we try to remove the directories.
self._run_svn(["revert", "-R", "."], cwd=self.checkout_root)
for path in added_files:
# This is robust against cwd != self.checkout_root
absolute_path = self.absolute_path(path)
# Completely lame that there is no easy way to remove both types with one call.
if os.path.isdir(path):
os.rmdir(absolute_path)
else:
os.remove(absolute_path)
def status_command(self):
return [self.executable_name, 'status']
def _status_regexp(self, expected_types):
field_count = 6 if self.svn_version() > "1.6" else 5
return "^(?P<status>[%s]).{%s} (?P<filename>.+)$" % (expected_types, field_count)
def _add_parent_directories(self, path):
"""Does 'svn add' to the path and its parents."""
if self.in_working_directory(path):
return
self.add(path)
def add_list(self, paths):
for path in paths:
self._add_parent_directories(os.path.dirname(os.path.abspath(path)))
if self.svn_version() >= "1.7":
# For subversion client 1.7 and later, need to add '--parents' option to ensure intermediate directories
# are added; in addition, 1.7 returns an exit code of 1 from svn add if one or more of the requested
# adds are already under version control, including intermediate directories subject to addition
# due to --parents
svn_add_args = ['svn', 'add', '--parents'] + paths
exit_code = self.run(svn_add_args, return_exit_code=True)
if exit_code and exit_code != 1:
raise ScriptError(script_args=svn_add_args, exit_code=exit_code)
else:
self._run_svn(["add"] + paths)
def _delete_parent_directories(self, path):
if not self.in_working_directory(path):
return
if set(os.listdir(path)) - self._svn_metadata_files:
return # Directory has non-trivial files in it.
self.delete(path)
def delete_list(self, paths):
for path in paths:
abs_path = os.path.abspath(path)
parent, base = os.path.split(abs_path)
result = self._run_svn(["delete", "--force", base], cwd=parent)
self._delete_parent_directories(os.path.dirname(abs_path))
return result
def exists(self, path):
return not self._run_svn(["info", path], return_exit_code=True, decode_output=False)
def changed_files(self, git_commit=None):
status_command = [self.executable_name, "status"]
status_command.extend(self._patch_directories)
# ACDMR: Addded, Conflicted, Deleted, Modified or Replaced
return self.run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR"))
def changed_files_for_revision(self, revision):
# As far as I can tell svn diff --summarize output looks just like svn status output.
# No file contents printed, thus utf-8 auto-decoding in self.run is fine.
status_command = [self.executable_name, "diff", "--summarize", "-c", revision]
return self.run_status_and_extract_filenames(status_command, self._status_regexp("ACDMR"))
def revisions_changing_file(self, path, limit=5):
revisions = []
# svn log will exit(1) (and thus self.run will raise) if the path does not exist.
log_command = ['log', '--quiet', '--limit=%s' % limit, path]
for line in self._run_svn(log_command, cwd=self.checkout_root).splitlines():
match = re.search('^r(?P<revision>\d+) ', line)
if not match:
continue
revisions.append(int(match.group('revision')))
return revisions
def conflicted_files(self):
return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("C"))
def added_files(self):
return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("A"))
def deleted_files(self):
return self.run_status_and_extract_filenames(self.status_command(), self._status_regexp("D"))
@staticmethod
def supports_local_commits():
return False
def display_name(self):
return "svn"
def svn_revision(self, path):
return self.value_from_svn_info(path, 'Revision')
def timestamp_of_revision(self, path, revision):
# We use --xml to get timestamps like 2013-02-08T08:18:04.964409Z
repository_root = self.value_from_svn_info(self.checkout_root, 'Repository Root')
info_output = Executive().run_command([self.executable_name, 'log', '-r', revision, '--xml', repository_root], cwd=path).rstrip()
match = re.search(r"^<date>(?P<value>.+)</date>\r?$", info_output, re.MULTILINE)
return match.group('value')
# FIXME: This method should be on Checkout.
def create_patch(self, git_commit=None, changed_files=None):
"""Returns a byte array (str()) representing the patch file.
Patch files are effectively binary since they may contain
files of multiple different encodings."""
if changed_files == []:
return ""
elif changed_files == None:
changed_files = []
return self.run([self.script_path("svn-create-patch")] + changed_files,
cwd=self.checkout_root, return_stderr=False,
decode_output=False)
def committer_email_for_revision(self, revision):
return self._run_svn(["propget", "svn:author", "--revprop", "-r", revision]).rstrip()
def contents_at_revision(self, path, revision):
"""Returns a byte array (str()) containing the contents
of path @ revision in the repository."""
remote_path = "%s/%s" % (self._repository_url(), path)
return self._run_svn(["cat", "-r", revision, remote_path], decode_output=False)
def diff_for_revision(self, revision):
# FIXME: This should probably use cwd=self.checkout_root
return self._run_svn(['diff', '-c', revision])
def _bogus_dir_name(self):
rnd = ''.join(random.sample(string.ascii_letters, 5))
if sys.platform.startswith("win"):
parent_dir = tempfile.gettempdir()
else:
parent_dir = sys.path[0] # tempdir is not secure.
return os.path.join(parent_dir, "temp_svn_config_" + rnd)
def _setup_bogus_dir(self, log):
self._bogus_dir = self._bogus_dir_name()
if not os.path.exists(self._bogus_dir):
os.mkdir(self._bogus_dir)
self._delete_bogus_dir = True
else:
self._delete_bogus_dir = False
if log:
log.debug(' Html: temp config dir: "%s".', self._bogus_dir)
def _teardown_bogus_dir(self, log):
if self._delete_bogus_dir:
shutil.rmtree(self._bogus_dir, True)
if log:
log.debug(' Html: removed temp config dir: "%s".', self._bogus_dir)
self._bogus_dir = None
def diff_for_file(self, path, log=None):
self._setup_bogus_dir(log)
try:
args = ['diff']
if self._bogus_dir:
args += ['--config-dir', self._bogus_dir]
args.append(path)
return self._run_svn(args, cwd=self.checkout_root)
finally:
self._teardown_bogus_dir(log)
def show_head(self, path):
return self._run_svn(['cat', '-r', 'BASE', path], decode_output=False)
def _repository_url(self):
return self.value_from_svn_info(self.checkout_root, 'URL')
def apply_reverse_diff(self, revision):
# '-c -revision' applies the inverse diff of 'revision'
svn_merge_args = ['merge', '--non-interactive', '-c', '-%s' % revision, self._repository_url()]
_log.warning("svn merge has been known to take more than 10 minutes to complete. It is recommended you use git for rollouts.")
_log.debug("Running 'svn %s'" % " ".join(svn_merge_args))
# FIXME: Should this use cwd=self.checkout_root?
self._run_svn(svn_merge_args)
def revert_files(self, file_paths):
# FIXME: This should probably use cwd=self.checkout_root.
self._run_svn(['revert'] + file_paths)
def commit_with_message(self, message, username=None, password=None, git_commit=None, force_squash=False, changed_files=None):
# git-commit and force are not used by SVN.
svn_commit_args = ["commit"]
if not username and not self.has_authorization_for_realm(self.svn_server_realm):
raise AuthenticationError(self.svn_server_host)
if username:
svn_commit_args.extend(["--username", username])
svn_commit_args.extend(["-m", message])
if changed_files:
svn_commit_args.extend(changed_files)
return self._run_svn(svn_commit_args, cwd=self.checkout_root, error_handler=commit_error_handler)
def svn_commit_log(self, svn_revision):
svn_revision = self.strip_r_from_svn_revision(svn_revision)
return self._run_svn(['log', '--non-interactive', '--revision', svn_revision])
def last_svn_commit_log(self):
# BASE is the checkout revision, HEAD is the remote repository revision
# http://svnbook.red-bean.com/en/1.0/ch03s03.html
return self.svn_commit_log('BASE')
def svn_blame(self, path):
return self._run_svn(['blame', path])
def propset(self, pname, pvalue, path):
dir, base = os.path.split(path)
return self._run_svn(['pset', pname, pvalue, base], cwd=dir)
def propget(self, pname, path):
dir, base = os.path.split(path)
return self._run_svn(['pget', pname, base], cwd=dir).encode('utf-8').rstrip("\n")
| bsd-3-clause |
VitalPet/odoo | addons/hr_payroll_account/wizard/hr_payroll_payslips_by_employees.py | 52 | 1762 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class hr_payslip_employees(osv.osv_memory):
_inherit ='hr.payslip.employees'
def compute_sheet(self, cr, uid, ids, context=None):
run_pool = self.pool.get('hr.payslip.run')
if context is None:
context = {}
if context and context.get('active_id', False):
run_data = run_pool.read(cr, uid, context['active_id'], ['journal_id'])
journal_id = run_data.get('journal_id', False)
journal_id = journal_id and journal_id[0] or False
if journal_id: context.update({'journal_id': journal_id})
return super(hr_payslip_employees, self).compute_sheet(cr, uid, ids, context=context)
hr_payslip_employees()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sxpert/ansible-modules-core | files/stat.py | 94 | 13076 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: stat
version_added: "1.3"
short_description: retrieve file or file system status
description:
- Retrieves facts for a file similar to the linux/unix 'stat' command.
options:
path:
description:
- The full path of the file/object to get the facts of
required: true
default: null
aliases: []
follow:
description:
- Whether to follow symlinks
required: false
default: no
aliases: []
get_md5:
description:
- Whether to return the md5 sum of the file. Will return None if we're unable to use md5 (Common for FIPS-140 compliant systems)
required: false
default: yes
aliases: []
get_checksum:
description:
- Whether to return a checksum of the file (currently sha1)
required: false
default: yes
aliases: []
version_added: "1.8"
author: "Bruce Pennypacker (@bpennypacker)"
'''
EXAMPLES = '''
# Obtain the stats of /etc/foo.conf, and check that the file still belongs
# to 'root'. Fail otherwise.
- stat: path=/etc/foo.conf
register: st
- fail: msg="Whoops! file ownership has changed"
when: st.stat.pw_name != 'root'
# Determine if a path exists and is a symlink. Note that if the path does
# not exist, and we test sym.stat.islnk, it will fail with an error. So
# therefore, we must test whether it is defined.
# Run this to understand the structure, the skipped ones do not pass the
# check performed by 'when'
- stat: path=/path/to/something
register: sym
- debug: msg="islnk isn't defined (path doesn't exist)"
when: sym.stat.islnk is not defined
- debug: msg="islnk is defined (path must exist)"
when: sym.stat.islnk is defined
- debug: msg="Path exists and is a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk
- debug: msg="Path exists and isn't a symlink"
when: sym.stat.islnk is defined and sym.stat.islnk == False
# Determine if a path exists and is a directory. Note that we need to test
# both that p.stat.isdir actually exists, and also that it's set to true.
- stat: path=/path/to/something
register: p
- debug: msg="Path exists and is a directory"
when: p.stat.isdir is defined and p.stat.isdir
# Don't do md5 checksum
- stat: path=/path/to/myhugefile get_md5=no
'''
RETURN = '''
stat:
description: dictionary containing all the stat data
returned: success
type: dictionary
contains:
exists:
description: if the destination path actually exists or not
returned: success
type: boolean
sample: True
path:
description: The full path of the file/object to get the facts of
returned: success and if path exists
type: boolean
sample: '/path/to/file'
mode:
description: Unix permissions of the file in octal
returned: success, path exists and user can read stats
type: octal
sample: 1755
isdir:
description: Tells you if the path is a directory
returned: success, path exists and user can read stats
type: boolean
sample: False
ischr:
description: Tells you if the path is a character device
returned: success, path exists and user can read stats
type: boolean
sample: False
isblk:
description: Tells you if the path is a block device
returned: success, path exists and user can read stats
type: boolean
sample: False
isreg:
description: Tells you if the path is a regular file
returned: success, path exists and user can read stats
type: boolean
sample: True
isfifo:
description: Tells you if the path is a named pipe
returned: success, path exists and user can read stats
type: boolean
sample: False
islnk:
description: Tells you if the path is a symbolic link
returned: success, path exists and user can read stats
type: boolean
sample: False
issock:
description: Tells you if the path is a unix domain socket
returned: success, path exists and user can read stats
type: boolean
sample: False
uid:
description: Numeric id representing the file owner
returned: success, path exists and user can read stats
type: int
sample: 1003
gid:
description: Numeric id representing the group of the owner
returned: success, path exists and user can read stats
type: int
sample: 1003
size:
description: Size in bytes for a plain file, ammount of data for some special files
returned: success, path exists and user can read stats
type: int
sample: 203
inode:
description: Inode number of the path
returned: success, path exists and user can read stats
type: int
sample: 12758
dev:
description: Device the inode resides on
returned: success, path exists and user can read stats
type: int
sample: 33
nlink:
description: Number of links to the inode (hard links)
returned: success, path exists and user can read stats
type: int
sample: 1
atime:
description: Time of last access
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
mtime:
description: Time of last modification
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
ctime:
description: Time of last metadata update or creation (depends on OS)
returned: success, path exists and user can read stats
type: float
sample: 1424348972.575
wusr:
description: Tells you if the owner has write permission
returned: success, path exists and user can read stats
type: boolean
sample: True
rusr:
description: Tells you if the owner has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xusr:
description: Tells you if the owner has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
wgrp:
description: Tells you if the owner's group has write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
rgrp:
description: Tells you if the owner's group has read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xgrp:
description: Tells you if the owner's group has execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
woth:
description: Tells you if others have write permission
returned: success, path exists and user can read stats
type: boolean
sample: False
roth:
description: Tells you if others have read permission
returned: success, path exists and user can read stats
type: boolean
sample: True
xoth:
description: Tells you if others have execute permission
returned: success, path exists and user can read stats
type: boolean
sample: True
isuid:
description: Tells you if the invoking user's id matches the owner's id
returned: success, path exists and user can read stats
type: boolean
sample: False
isgid:
description: Tells you if the invoking user's group id matches the owner's group id
returned: success, path exists and user can read stats
type: boolean
sample: False
lnk_source:
description: Original path
returned: success, path exists and user can read stats and the path is a symbolic link
type: boolean
sample: True
md5:
description: md5 hash of the path
returned: success, path exists and user can read stats and path supports hashing and md5 is supported
type: string
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
checksum:
description: hash of the path
returned: success, path exists and user can read stats and path supports hashing
type: string
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
pw_name:
description: User name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: httpd
gr_name:
description: Group name of owner
returned: success, path exists and user can read stats and installed python supports it
type: string
sample: www-data
'''
import os
import sys
from stat import *
import pwd
import grp
def main():
module = AnsibleModule(
argument_spec = dict(
path = dict(required=True),
follow = dict(default='no', type='bool'),
get_md5 = dict(default='yes', type='bool'),
get_checksum = dict(default='yes', type='bool')
),
supports_check_mode = True
)
path = module.params.get('path')
path = os.path.expanduser(path)
follow = module.params.get('follow')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
try:
if follow:
st = os.stat(path)
else:
st = os.lstat(path)
except OSError, e:
if e.errno == errno.ENOENT:
d = { 'exists' : False }
module.exit_json(changed=False, stat=d)
module.fail_json(msg = e.strerror)
mode = st.st_mode
# back to ansible
d = {
'exists' : True,
'path' : path,
'mode' : "%04o" % S_IMODE(mode),
'isdir' : S_ISDIR(mode),
'ischr' : S_ISCHR(mode),
'isblk' : S_ISBLK(mode),
'isreg' : S_ISREG(mode),
'isfifo' : S_ISFIFO(mode),
'islnk' : S_ISLNK(mode),
'issock' : S_ISSOCK(mode),
'uid' : st.st_uid,
'gid' : st.st_gid,
'size' : st.st_size,
'inode' : st.st_ino,
'dev' : st.st_dev,
'nlink' : st.st_nlink,
'atime' : st.st_atime,
'mtime' : st.st_mtime,
'ctime' : st.st_ctime,
'wusr' : bool(mode & stat.S_IWUSR),
'rusr' : bool(mode & stat.S_IRUSR),
'xusr' : bool(mode & stat.S_IXUSR),
'wgrp' : bool(mode & stat.S_IWGRP),
'rgrp' : bool(mode & stat.S_IRGRP),
'xgrp' : bool(mode & stat.S_IXGRP),
'woth' : bool(mode & stat.S_IWOTH),
'roth' : bool(mode & stat.S_IROTH),
'xoth' : bool(mode & stat.S_IXOTH),
'isuid' : bool(mode & stat.S_ISUID),
'isgid' : bool(mode & stat.S_ISGID),
}
if S_ISLNK(mode):
d['lnk_source'] = os.path.realpath(path)
if S_ISREG(mode) and get_md5 and os.access(path,os.R_OK):
# Will fail on FIPS-140 compliant systems
try:
d['md5'] = module.md5(path)
except ValueError:
d['md5'] = None
if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK):
d['checksum'] = module.sha1(path)
try:
pw = pwd.getpwuid(st.st_uid)
d['pw_name'] = pw.pw_name
grp_info = grp.getgrgid(st.st_gid)
d['gr_name'] = grp_info.gr_name
except:
pass
module.exit_json(changed=False, stat=d)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
googleinterns/wss | third_party/deeplab/evaluation/parsing_covering_test.py | 4 | 5968 | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Parsing Covering metric."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as np
from deeplab.evaluation import parsing_covering
from deeplab.evaluation import test_utils
# See the definition of the color names at:
# https://en.wikipedia.org/wiki/Web_colors.
_CLASS_COLOR_MAP = {
(0, 0, 0): 0,
(0, 0, 255): 1, # Person (blue).
(255, 0, 0): 2, # Bear (red).
(0, 255, 0): 3, # Tree (lime).
(255, 0, 255): 4, # Bird (fuchsia).
(0, 255, 255): 5, # Sky (aqua).
(255, 255, 0): 6, # Cat (yellow).
}
class CoveringConveringTest(absltest.TestCase):
def test_perfect_match(self):
categories = np.zeros([6, 6], np.uint16)
instances = np.array([
[2, 2, 2, 2, 2, 2],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 4, 4, 2],
[2, 4, 4, 2, 2, 2],
[2, 4, 2, 2, 2, 2],
],
dtype=np.uint16)
pc = parsing_covering.ParsingCovering(
num_categories=3,
ignored_label=2,
max_instances_per_category=2,
offset=16,
normalize_by_image_size=False)
pc.compare_and_accumulate(categories, instances, categories, instances)
np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 21.0, 0.0])
np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 21.0, 0.0])
np.testing.assert_array_equal(pc.result_per_category(), [0.0, 1.0, 0.0])
self.assertEqual(pc.result(), 1.0)
def test_totally_wrong(self):
categories = np.zeros([6, 6], np.uint16)
gt_instances = np.array([
[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0],
],
dtype=np.uint16)
pred_instances = 1 - gt_instances
pc = parsing_covering.ParsingCovering(
num_categories=2,
ignored_label=0,
max_instances_per_category=1,
offset=16,
normalize_by_image_size=False)
pc.compare_and_accumulate(categories, gt_instances, categories,
pred_instances)
np.testing.assert_array_equal(pc.weighted_iou_per_class, [0.0, 0.0])
np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 10.0])
np.testing.assert_array_equal(pc.result_per_category(), [0.0, 0.0])
self.assertEqual(pc.result(), 0.0)
def test_matches_expected(self):
pred_classes = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', _CLASS_COLOR_MAP)
pred_instances = test_utils.read_test_image(
'team_pred_instance.png', mode='L')
instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_class_map)
pc = parsing_covering.ParsingCovering(
num_categories=3,
ignored_label=0,
max_instances_per_category=256,
offset=256 * 256,
normalize_by_image_size=False)
pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes,
pred_instances)
np.testing.assert_array_almost_equal(
pc.weighted_iou_per_class, [0.0, 39864.14634, 3136], decimal=4)
np.testing.assert_array_equal(pc.gt_area_per_class, [0.0, 56870, 5800])
np.testing.assert_array_almost_equal(
pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4)
self.assertAlmostEqual(pc.result(), 0.6208296732)
def test_matches_expected_normalize_by_size(self):
pred_classes = test_utils.read_segmentation_with_rgb_color_map(
'team_pred_class.png', _CLASS_COLOR_MAP)
pred_instances = test_utils.read_test_image(
'team_pred_instance.png', mode='L')
instance_class_map = {
0: 0,
47: 1,
97: 1,
133: 1,
150: 1,
174: 1,
198: 2,
215: 1,
244: 1,
255: 1,
}
gt_instances, gt_classes = test_utils.panoptic_segmentation_with_class_map(
'team_gt_instance.png', instance_class_map)
pc = parsing_covering.ParsingCovering(
num_categories=3,
ignored_label=0,
max_instances_per_category=256,
offset=256 * 256,
normalize_by_image_size=True)
pc.compare_and_accumulate(gt_classes, gt_instances, pred_classes,
pred_instances)
np.testing.assert_array_almost_equal(
pc.weighted_iou_per_class, [0.0, 0.5002088756, 0.03935002196],
decimal=4)
np.testing.assert_array_almost_equal(
pc.gt_area_per_class, [0.0, 0.7135955832, 0.07277746408], decimal=4)
# Note that the per-category and overall PCs are identical to those without
# normalization in the previous test, because we only have a single image.
np.testing.assert_array_almost_equal(
pc.result_per_category(), [0.0, 0.70097, 0.54069], decimal=4)
self.assertAlmostEqual(pc.result(), 0.6208296732)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
shaftoe/home-assistant | homeassistant/components/image_processing/dlib_face_detect.py | 3 | 2106 | """
Component that will help set the Dlib face detect processing.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/image_processing.dlib_face_detect/
"""
import logging
import io
from homeassistant.core import split_entity_id
# pylint: disable=unused-import
from homeassistant.components.image_processing import PLATFORM_SCHEMA # noqa
from homeassistant.components.image_processing import (
CONF_SOURCE, CONF_ENTITY_ID, CONF_NAME)
from homeassistant.components.image_processing.microsoft_face_identify import (
ImageProcessingFaceEntity)
REQUIREMENTS = ['face_recognition==0.1.14']
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Dlib Face detection platform."""
entities = []
for camera in config[CONF_SOURCE]:
entities.append(DlibFaceDetectEntity(
camera[CONF_ENTITY_ID], camera.get(CONF_NAME)
))
add_devices(entities)
class DlibFaceDetectEntity(ImageProcessingFaceEntity):
"""Dlib Face API entity for identify."""
def __init__(self, camera_entity, name=None):
"""Initialize Dlib face entity."""
super().__init__()
self._camera = camera_entity
if name:
self._name = name
else:
self._name = "Dlib Face {0}".format(
split_entity_id(camera_entity)[1])
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the entity."""
return self._name
def process_image(self, image):
"""Process image."""
# pylint: disable=import-error
import face_recognition
fak_file = io.BytesIO(image)
fak_file.name = 'snapshot.jpg'
fak_file.seek(0)
image = face_recognition.load_image_file(fak_file)
face_locations = face_recognition.face_locations(image)
self.process_faces(face_locations, len(face_locations))
| apache-2.0 |
janusnic/21v-python | unit_19/design/main.py | 1 | 1996 | from PyQt4 import QtGui # Import the PyQt4 module we'll need
import sys # We need sys so that we can pass argv to QApplication
import design # This file holds our MainWindow and all design related things
# it also keeps events etc that we defined in Qt Designer
import os # For listing directory methods
class ExampleApp(QtGui.QMainWindow, design.Ui_MainWindow):
def __init__(self):
# Explaining super is out of the scope of this article
# So please google it if you're not familar with it
# Simple reason why we use it here is that it allows us to
# access variables, methods etc in the design.py file
super(self.__class__, self).__init__()
self.setupUi(self) # This is defined in design.py file automatically
# It sets up layout and widgets that are defined
self.btnBrowse.clicked.connect(self.browse_folder) # When the button is pressed
# Execute browse_folder function
def browse_folder(self):
self.listWidget.clear() # In case there are any existing elements in the list
directory = QtGui.QFileDialog.getExistingDirectory(self,
"Pick a folder")
# execute getExistingDirectory dialog and set the directory variable to be equal
# to the user selected directory
if directory: # if user didn't pick a directory don't continue
for file_name in os.listdir(directory): # for all files, if any, in the directory
self.listWidget.addItem(file_name) # add file to the listWidget
def main():
app = QtGui.QApplication(sys.argv) # A new instance of QApplication
form = ExampleApp() # We set the form to be our ExampleApp (design)
form.show() # Show the form
app.exec_() # and execute the app
if __name__ == '__main__': # if we're running file directly and not importing it
main() # run the main function | mit |
Ozmodian/Wordpress_local | eb/macosx/python3/lib/elasticbeanstalk/exception.py | 10 | 2130 | #!/usr/bin/env python
#==============================================================================
# Copyright 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Amazon Software License (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/asl/
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or
# implied. See the License for the specific language governing permissions
# and limitations under the License.
#==============================================================================
from lib.aws.exception import AwsServiceException
class EBErrorCode(object):
'''Elastic Beanstalk error code'''
TooManyApplicationsException = 'TooManyApplicationsException'
TooManyConfigurationTemplates = 'TooManyConfigurationTemplates'
OperationInProgress = 'OperationInProgress'
SourceBundleDeletion = 'SourceBundleDeletion'
S3LocationNotInServiceRegion = 'S3LocationNotInServiceRegion'
class ElasticBeanstalkException(AwsServiceException):
def __init__(self, ex):
if not issubclass(ex.__class__, AwsServiceException):
raise AttributeError('Must initialize from instance of AwsServiceException subclass.')
super(ElasticBeanstalkException, self).__init__(ex.message, ex.code, ex.http_code)
class TooManyApplicationsException(ElasticBeanstalkException):
pass
class TooManyApplicationVersionsException(ElasticBeanstalkException):
pass
class TooManyEnvironmentsException(ElasticBeanstalkException):
pass
class AlreadyExistException(ElasticBeanstalkException):
pass
class OperationInProgressException(ElasticBeanstalkException):
pass
class ApplicationHasRunningEnvException(ElasticBeanstalkException):
pass
class SourceBundleDeletionException(ElasticBeanstalkException):
pass
class S3LocationNotInServiceRegionException(ElasticBeanstalkException):
pass
| gpl-2.0 |
sjagoe/github-jenkins | github_jenkins/settings.py | 1 | 7007 | # Django settings for github_jenkins project.
import os
import social_auth
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(os.path.dirname(__file__), 'db.sqlite3'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
# 'USER': '',
# 'PASSWORD': '',
# 'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
# 'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/github-jenkins/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(os.path.dirname(__file__), 'static'),
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '1w4#0m^073ybn=0dvk&1pymwwb1)f2ww2fm+n5@45f@-kvk@co'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'github_jenkins.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'github_jenkins.wsgi.application'
TEMPLATE_DIRS = (
os.path.join(os.path.dirname(__file__), 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'django.contrib.admindocs',
'social_auth',
'github_jenkins.app',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'standard': {
'fmt': '%(asctime)s %(levelname)-8.8s [%(name)s:%(lineno)s] %(message)s',
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
},
'console': {
'level': 'DEBUG',
'formatter': 'standard',
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stderr',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'github_jenkins.app.notifications': {
'handlers': ['console'],
'level': 'DEBUG',
},
'github_jenkins.app.views': {
'handlers': ['console'],
'level': 'DEBUG',
},
'github_jenkins.app.models': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
}
AUTHENTICATION_BACKENDS = (
'social_auth.backends.contrib.github.GithubBackend',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'social_auth.context_processors.social_auth_by_type_backends',
)
# LOGIN_URL = '/github-jenkins/accounts/login'
LOGIN_REDIRECT_URL = '/github-jenkins'
LOGOUT_URL = '/github-jenkins/logout'
try:
from github_jenkins.local_settings import *
except ImportError:
raise Exception('local_settings.py must be created with authentication config')
| bsd-3-clause |
quantifiedcode-bot/invenio-search | invenio_search/enhancers/cache_results.py | 7 | 1701 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Query results cacher."""
from invenio_search.cache import get_results_cache, set_results_cache
class CacheOp(object):
"""Store results in cache."""
def __init__(self, query, collection=None):
"""Define query that should be cached."""
self.query = query
self.collection = collection
def __repr__(self):
"""Object representation."""
return "%s(%s)" % (self.__class__.__name__, repr(self.query))
def accept(self, visitor):
"""Store intermediate results to the cache."""
results = get_results_cache(str(self.query), self.collection)
if results is None:
results = self.query.accept(visitor)
set_results_cache(results, str(self.query), self.collection)
return results
def apply(query, collection=None, **kwargs):
"""Decorate query with a cache operator."""
return CacheOp(query, collection=collection)
| gpl-2.0 |
Epirex/android_external_chromium_org | tools/metrics/histograms/find_unmapped_histograms.py | 56 | 7483 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Scans the Chromium source for histograms that are absent from histograms.xml.
This is a heuristic scan, so a clean run of this script does not guarantee that
all histograms in the Chromium source are properly mapped. Notably, field
trials are entirely ignored by this script.
"""
import commands
import extract_histograms
import logging
import optparse
import os
import re
import sys
ADJACENT_C_STRING_REGEX = re.compile(r"""
(" # Opening quotation mark
[^"]*) # Literal string contents
" # Closing quotation mark
\s* # Any number of spaces
" # Another opening quotation mark
""", re.VERBOSE)
CONSTANT_REGEX = re.compile(r"""
(\w*::)? # Optional namespace
k[A-Z] # Match a constant identifier: 'k' followed by an uppercase letter
\w* # Match the rest of the constant identifier
$ # Make sure there's only the identifier, nothing else
""", re.VERBOSE)
HISTOGRAM_REGEX = re.compile(r"""
UMA_HISTOGRAM # Match the shared prefix for standard UMA histogram macros
\w* # Match the rest of the macro name, e.g. '_ENUMERATION'
\( # Match the opening parenthesis for the macro
\s* # Match any whitespace -- especially, any newlines
([^,]*) # Capture the first parameter to the macro
, # Match the comma that delineates the first parameter
""", re.VERBOSE)
class DirectoryNotFoundException(Exception):
"""Base class to distinguish locally defined exceptions from standard ones."""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
def changeWorkingDirectory(target_directory):
"""Changes the working directory to the given |target_directory|, which
defaults to the root of the Chromium checkout.
Returns:
None
Raises:
DirectoryNotFoundException if the target directory cannot be found.
"""
working_directory = os.getcwd()
pos = working_directory.find(target_directory)
if pos < 0:
raise DirectoryNotFoundException('Could not find root directory "' +
target_directory + '". ' +
'Please run this script within your ' +
'Chromium checkout.')
os.chdir(working_directory[:pos + len(target_directory)])
def collapseAdjacentCStrings(string):
"""Collapses any adjacent C strings into a single string.
Useful to re-combine strings that were split across multiple lines to satisfy
the 80-col restriction.
Args:
string: The string to recombine, e.g. '"Foo"\n "bar"'
Returns:
The collapsed string, e.g. "Foobar" for an input of '"Foo"\n "bar"'
"""
while True:
collapsed = ADJACENT_C_STRING_REGEX.sub(r'\1', string, count=1)
if collapsed == string:
return collapsed
string = collapsed
def logNonLiteralHistogram(filename, histogram):
"""Logs a statement warning about a non-literal histogram name found in the
Chromium source.
Filters out known acceptable exceptions.
Args:
filename: The filename for the file containing the histogram, e.g.
'chrome/browser/memory_details.cc'
histogram: The expression that evaluates to the name of the histogram, e.g.
'"FakeHistogram" + variant'
Returns:
None
"""
# Ignore histogram macros, which typically contain backslashes so that they
# can be formatted across lines.
if '\\' in histogram:
return
# Field trials are unique within a session, so are effectively constants.
if histogram.startswith('base::FieldTrial::MakeName'):
return
# Ignore histogram names that have been pulled out into C++ constants.
if CONSTANT_REGEX.match(histogram):
return
# TODO(isherman): This is still a little noisy... needs further filtering to
# reduce the noise.
logging.warning('%s contains non-literal histogram name <%s>', filename,
histogram)
def readChromiumHistograms():
"""Searches the Chromium source for all histogram names.
Also prints warnings for any invocations of the UMA_HISTOGRAM_* macros with
names that might vary during a single run of the app.
Returns:
A set cotaining any found literal histogram names.
"""
logging.info('Scanning Chromium source for histograms...')
# Use git grep to find all invocations of the UMA_HISTOGRAM_* macros.
# Examples:
# 'path/to/foo.cc:420: UMA_HISTOGRAM_COUNTS_100("FooGroup.FooName",'
# 'path/to/bar.cc:632: UMA_HISTOGRAM_ENUMERATION('
locations = commands.getoutput('git gs UMA_HISTOGRAM').split('\n')
filenames = set([location.split(':')[0] for location in locations])
histograms = set()
for filename in filenames:
contents = ''
with open(filename, 'r') as f:
contents = f.read()
matches = set(HISTOGRAM_REGEX.findall(contents))
for histogram in matches:
histogram = collapseAdjacentCStrings(histogram)
# Must begin and end with a quotation mark.
if histogram[0] != '"' or histogram[-1] != '"':
logNonLiteralHistogram(filename, histogram)
continue
# Must not include any quotation marks other than at the beginning or end.
histogram_stripped = histogram.strip('"')
if '"' in histogram_stripped:
logNonLiteralHistogram(filename, histogram)
continue
histograms.add(histogram_stripped)
return histograms
def readXmlHistograms(histograms_file_location):
"""Parses all histogram names from histograms.xml.
Returns:
A set cotaining the parsed histogram names.
"""
logging.info('Reading histograms from %s...' % histograms_file_location)
histograms = extract_histograms.ExtractHistograms(histograms_file_location)
return set(extract_histograms.ExtractNames(histograms))
def main():
# Parse command line options
parser = optparse.OptionParser()
parser.add_option(
'--root-directory', dest='root_directory', default='src',
help='scan within DIRECTORY for histograms [optional, defaults to "src/"]',
metavar='DIRECTORY')
parser.add_option(
'--histograms-file', dest='histograms_file_location',
default='tools/metrics/histograms/histograms.xml',
help='read histogram definitions from FILE (relative to --root-directory) '
'[optional, defaults to "tools/histograms/histograms.xml"]',
metavar='FILE')
(options, args) = parser.parse_args()
if args:
parser.print_help()
sys.exit(1)
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
try:
changeWorkingDirectory(options.root_directory)
except DirectoryNotFoundException as e:
logging.error(e)
sys.exit(1)
chromium_histograms = readChromiumHistograms()
xml_histograms = readXmlHistograms(options.histograms_file_location)
unmapped_histograms = sorted(chromium_histograms - xml_histograms)
if len(unmapped_histograms):
logging.info('')
logging.info('')
logging.info('Histograms in Chromium but not in %s:' %
options.histograms_file_location)
logging.info('-------------------------------------------------')
for histogram in unmapped_histograms:
logging.info(' %s', histogram)
else:
logging.info('Success! No unmapped histograms found.')
if __name__ == '__main__':
main()
| bsd-3-clause |
JonatanAntoni/CMSIS_5 | CMSIS/DSP/Testing/DebugScripts/debug.py | 2 | 3123 | import numpy as np
from pylab import figure, clf, plot, xlabel, ylabel, xlim, ylim, title, grid, axes, show,semilogx, semilogy
import scipy.fftpack
import os.path
import struct
import argparse
import PatternGeneration.DebugTools as d
# Example script to read patterns and test outputs
parser = argparse.ArgumentParser(description='Debug description')
parser.add_argument('-f', nargs='?',type = str, default="f32", help="Format")
parser.add_argument('-n', nargs='?',type = str, default="1", help="Test number")
parser.add_argument('-i', nargs='?',type = bool, default=False, help="Ifft")
parser.add_argument('-ui', nargs='?',const=True,type = bool, default=False, help="Display curves")
args = parser.parse_args()
FFTSIZES=[16,32,64,128,256,512,1024,2048,4096]
if int(args.n) >= 19:
args.i = True
if args.i:
n = int(args.n) - 18
s = FFTSIZES[n-1]
sc = n - 1 + 4
inputPath = os.path.join("Patterns","DSP","Transform","Transform%s" % args.f.upper(),"ComplexInputIFFTSamples_Noisy_%d_%d_%s.txt" % (s,n,args.f))
refPath = os.path.join("Patterns","DSP","Transform","Transform%s" % args.f.upper(),"ComplexInputSamples_Noisy_%d_%d_%s.txt" % (s,n,args.f))
outputPath= os.path.join("Output","DSP","Transform","Transform%s" % args.f.upper(),"ComplexFFTSamples_%s.txt" % args.n)
else:
s = FFTSIZES[int(args.n)-1]
inputPath = os.path.join("Patterns","DSP","Transform","Transform%s" % args.f.upper(),"ComplexInputSamples_Noisy_%d_%s_%s.txt" % (s,args.n,args.f))
refPath = os.path.join("Patterns","DSP","Transform","Transform%s" % args.f.upper(),"ComplexFFTSamples_Noisy_%d_%s_%s.txt" % (s,args.n,args.f))
outputPath= os.path.join("Output","DSP","Transform","Transform%s" % args.f.upper(),"ComplexFFTSamples_%s.txt" % args.n)
print(inputPath)
if args.f == "f32":
inSig = d.readF32Pattern(inputPath)
inSig=inSig.view(dtype=np.complex128)
refSig = d.readF32Pattern(refPath)
refSig=refSig.view(dtype=np.complex128)
sig = d.readF32Output(outputPath)
sig=sig.view(dtype=np.complex128)
if args.f == "q31":
inSig = d.readQ31Pattern(inputPath)
inSig=inSig.view(dtype=np.complex128)
refSig = d.readQ31Pattern(refPath)
refSig=refSig.view(dtype=np.complex128)
sig = d.readQ31Output(outputPath)
sig=sig.view(dtype=np.complex128)
if args.f == "q15":
inSig = d.readQ15Pattern(inputPath)
inSig=inSig.view(dtype=np.complex128)
refSig = d.readQ15Pattern(refPath)
refSig=refSig.view(dtype=np.complex128)
sig = d.readQ15Output(outputPath)
sig=sig.view(dtype=np.complex128)
if args.i and args.f != "f32":
refSig = refSig / 2**sc
if args.ui:
if args.i:
figure()
plot(abs(inSig))
figure()
plot(np.real(refSig))
figure()
plot(np.real(sig))
else:
figure()
plot(np.real(inSig))
figure()
plot(abs(refSig))
figure()
plot(abs(sig))
print(d.SNR(refSig,sig))
#figure()
#plot(np.unwrap(np.angle(refSig)))
#figure()
#plot(np.unwrap(np.angle(sig)))
#figure()
#plot(np.unwrap(np.angle(sig)) - np.unwrap(np.angle(refSig)))
show()# | apache-2.0 |
bitjammer/swift | utils/android/adb/commands.py | 32 | 6053 | # adb/commands.py - Run executables on an Android device -*- python -*-
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Push executables to an Android device and run them, capturing their output
# and exit code.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import subprocess
import tempfile
import uuid
# A temporary directory on the Android device.
DEVICE_TEMP_DIR = '/data/local/tmp'
def shell(args):
"""
Execute 'adb shell' with the given arguments.
Raise an exception if 'adb shell' returns a non-zero exit code.
Note that this only occurs if communication with the connected device
fails, not if the command run on the device fails.
"""
return subprocess.check_output(['adb', 'shell'] + args)
def rmdir(path):
"""Remove all files in the device directory at `path`."""
shell(['rm', '-rf', '{}/*'.format(path)])
def push(local_path, device_path):
"""Move the file at the given local path to the path on the device."""
return subprocess.check_output(['adb', 'push', local_path, device_path],
stderr=subprocess.STDOUT).strip()
def reboot():
"""Reboot the connected Android device, waiting for it to return online."""
subprocess.check_call(['adb', 'reboot'])
subprocess.check_call(['adb', 'wait-for-device'])
def _create_executable_on_device(device_path, contents):
_, tmp = tempfile.mkstemp()
with open(tmp, 'w') as f:
f.write(contents)
push(tmp, device_path)
shell(['chmod', '755', device_path])
def execute_on_device(executable_path, executable_arguments):
"""
Run an executable on an Android device.
Push an executable at the given 'executable_path' to an Android device,
then execute that executable on the device, passing any additional
'executable_arguments'. Return 0 if the executable succeeded when run on
device, and 1 otherwise.
This function is not as simple as calling 'adb shell', for two reasons:
1. 'adb shell' can only take input up to a certain length, so it fails for
long executable names or when a large amount of arguments are passed to
the executable. This function attempts to limit the size of any string
passed to 'adb shell'.
2. 'adb shell' ignores the exit code of any command it runs. This function
therefore uses its own mechanisms to determine whether the executable
had a successful exit code when run on device.
"""
# We'll be running the executable in a temporary directory in
# /data/local/tmp. `adb shell` has trouble with commands that
# exceed a certain length, so to err on the safe side we only
# use the first 10 characters of the UUID.
uuid_dir = '{}/{}'.format(DEVICE_TEMP_DIR, str(uuid.uuid4())[:10])
shell(['mkdir', '-p', uuid_dir])
# `adb` can only handle commands under a certain length. No matter what the
# original executable's name, on device we call it `__executable`.
executable = '{}/__executable'.format(uuid_dir)
push(executable_path, executable)
# When running the executable on the device, we need to pass it the same
# arguments, as well as specify the correct LD_LIBRARY_PATH. Save these
# to a file we can easily call multiple times.
executable_with_args = '{}/__executable_with_args'.format(uuid_dir)
_create_executable_on_device(
executable_with_args,
'LD_LIBRARY_PATH={uuid_dir}:{tmp_dir} '
'{executable} {executable_arguments}'.format(
uuid_dir=uuid_dir,
tmp_dir=DEVICE_TEMP_DIR,
executable=executable,
executable_arguments=' '.join(executable_arguments)))
# Write the output from the test executable to a file named '__stdout', and
# if the test executable succeeds, write 'SUCCEEDED' to a file
# named '__succeeded'. We do this because `adb shell` does not report
# the exit code of the command it executes on the device, so instead we
# check the '__succeeded' file for our string.
executable_stdout = '{}/__stdout'.format(uuid_dir)
succeeded_token = 'SUCCEEDED'
executable_succeeded = '{}/__succeeded'.format(uuid_dir)
executable_piped = '{}/__executable_piped'.format(uuid_dir)
_create_executable_on_device(
executable_piped,
'{executable_with_args} > {executable_stdout} && '
'echo "{succeeded_token}" > {executable_succeeded}'.format(
executable_with_args=executable_with_args,
executable_stdout=executable_stdout,
succeeded_token=succeeded_token,
executable_succeeded=executable_succeeded))
# We've pushed everything we need to the device.
# Now execute the wrapper script.
shell([executable_piped])
# Grab the results of running the executable on device.
stdout = shell(['cat', executable_stdout])
exitcode = shell(['cat', executable_succeeded])
if not exitcode.startswith(succeeded_token):
debug_command = '$ adb shell {}'.format(executable_with_args)
print('Executable exited with a non-zero code on the Android device.\n'
'Device stdout:\n'
'{stdout}\n'
'To debug, run:\n'
'{debug_command}\n'.format(
stdout=stdout,
debug_command=debug_command))
# Exit early so that the output isn't passed to FileCheck, nor are any
# temporary directories removed; this allows the user to re-run
# the executable on the device.
return 1
print(stdout)
shell(['rm', '-rf', uuid_dir])
return 0
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.