repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
vukae/eve-wspace | evewspace/Map/tests.py | 138 | 1244 | # Eve W-Space
# Copyright (C) 2013 Andrew Austin and other contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version. An additional term under section
# 7 of the GPL is included in the LICENSE file.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
| gpl-3.0 |
lumig242/Hue-Integration-with-CDAP | desktop/core/ext-py/boto-2.38.0/boto/mashups/order.py | 153 | 7584 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
High-level abstraction of an EC2 order for servers
"""
import boto
import boto.ec2
from boto.mashups.server import Server, ServerSet
from boto.mashups.iobject import IObject
from boto.pyami.config import Config
from boto.sdb.persist import get_domain, set_domain
import time
from boto.compat import StringIO
InstanceTypes = ['m1.small', 'm1.large', 'm1.xlarge', 'c1.medium', 'c1.xlarge']
class Item(IObject):
def __init__(self):
self.region = None
self.name = None
self.instance_type = None
self.quantity = 0
self.zone = None
self.ami = None
self.groups = []
self.key = None
self.ec2 = None
self.config = None
def set_userdata(self, key, value):
self.userdata[key] = value
def get_userdata(self, key):
return self.userdata[key]
def set_region(self, region=None):
if region:
self.region = region
else:
l = [(r, r.name, r.endpoint) for r in boto.ec2.regions()]
self.region = self.choose_from_list(l, prompt='Choose Region')
def set_name(self, name=None):
if name:
self.name = name
else:
self.name = self.get_string('Name')
def set_instance_type(self, instance_type=None):
if instance_type:
self.instance_type = instance_type
else:
self.instance_type = self.choose_from_list(InstanceTypes, 'Instance Type')
def set_quantity(self, n=0):
if n > 0:
self.quantity = n
else:
self.quantity = self.get_int('Quantity')
def set_zone(self, zone=None):
if zone:
self.zone = zone
else:
l = [(z, z.name, z.state) for z in self.ec2.get_all_zones()]
self.zone = self.choose_from_list(l, prompt='Choose Availability Zone')
def set_ami(self, ami=None):
if ami:
self.ami = ami
else:
l = [(a, a.id, a.location) for a in self.ec2.get_all_images()]
self.ami = self.choose_from_list(l, prompt='Choose AMI')
def add_group(self, group=None):
if group:
self.groups.append(group)
else:
l = [(s, s.name, s.description) for s in self.ec2.get_all_security_groups()]
self.groups.append(self.choose_from_list(l, prompt='Choose Security Group'))
def set_key(self, key=None):
if key:
self.key = key
else:
l = [(k, k.name, '') for k in self.ec2.get_all_key_pairs()]
self.key = self.choose_from_list(l, prompt='Choose Keypair')
def update_config(self):
if not self.config.has_section('Credentials'):
self.config.add_section('Credentials')
self.config.set('Credentials', 'aws_access_key_id', self.ec2.aws_access_key_id)
self.config.set('Credentials', 'aws_secret_access_key', self.ec2.aws_secret_access_key)
if not self.config.has_section('Pyami'):
self.config.add_section('Pyami')
sdb_domain = get_domain()
if sdb_domain:
self.config.set('Pyami', 'server_sdb_domain', sdb_domain)
self.config.set('Pyami', 'server_sdb_name', self.name)
def set_config(self, config_path=None):
if not config_path:
config_path = self.get_filename('Specify Config file')
self.config = Config(path=config_path)
def get_userdata_string(self):
s = StringIO()
self.config.write(s)
return s.getvalue()
def enter(self, **params):
self.region = params.get('region', self.region)
if not self.region:
self.set_region()
self.ec2 = self.region.connect()
self.name = params.get('name', self.name)
if not self.name:
self.set_name()
self.instance_type = params.get('instance_type', self.instance_type)
if not self.instance_type:
self.set_instance_type()
self.zone = params.get('zone', self.zone)
if not self.zone:
self.set_zone()
self.quantity = params.get('quantity', self.quantity)
if not self.quantity:
self.set_quantity()
self.ami = params.get('ami', self.ami)
if not self.ami:
self.set_ami()
self.groups = params.get('groups', self.groups)
if not self.groups:
self.add_group()
self.key = params.get('key', self.key)
if not self.key:
self.set_key()
self.config = params.get('config', self.config)
if not self.config:
self.set_config()
self.update_config()
class Order(IObject):
def __init__(self):
self.items = []
self.reservation = None
def add_item(self, **params):
item = Item()
item.enter(**params)
self.items.append(item)
def display(self):
print('This Order consists of the following items')
print()
print('QTY\tNAME\tTYPE\nAMI\t\tGroups\t\t\tKeyPair')
for item in self.items:
print('%s\t%s\t%s\t%s\t%s\t%s' % (item.quantity, item.name, item.instance_type,
item.ami.id, item.groups, item.key.name))
def place(self, block=True):
if get_domain() is None:
print('SDB Persistence Domain not set')
domain_name = self.get_string('Specify SDB Domain')
set_domain(domain_name)
s = ServerSet()
for item in self.items:
r = item.ami.run(min_count=1, max_count=item.quantity,
key_name=item.key.name, user_data=item.get_userdata_string(),
security_groups=item.groups, instance_type=item.instance_type,
placement=item.zone.name)
if block:
states = [i.state for i in r.instances]
if states.count('running') != len(states):
print(states)
time.sleep(15)
states = [i.update() for i in r.instances]
for i in r.instances:
server = Server()
server.name = item.name
server.instance_id = i.id
server.reservation = r
server.save()
s.append(server)
if len(s) == 1:
return s[0]
else:
return s
| apache-2.0 |
giorgiop/scipy | scipy/linalg/decomp.py | 16 | 31707 | #
# Author: Pearu Peterson, March 2002
#
# additions by Travis Oliphant, March 2002
# additions by Eric Jones, June 2002
# additions by Johannes Loehnert, June 2006
# additions by Bart Vandereycken, June 2006
# additions by Andrew D Straw, May 2007
# additions by Tiziano Zito, November 2008
#
# April 2010: Functions for LU, QR, SVD, Schur and Cholesky decompositions were
# moved to their own files. Still in this file are functions for eigenstuff
# and for the Hessenberg form.
from __future__ import division, print_function, absolute_import
__all__ = ['eig', 'eigh', 'eig_banded', 'eigvals', 'eigvalsh',
'eigvals_banded', 'hessenberg']
import numpy
from numpy import (array, isfinite, inexact, nonzero, iscomplexobj, cast,
flatnonzero, conj)
# Local imports
from scipy._lib.six import xrange
from scipy._lib._util import _asarray_validated
from .misc import LinAlgError, _datacopied, norm
from .lapack import get_lapack_funcs
_I = cast['F'](1j)
def _make_complex_eigvecs(w, vin, dtype):
"""
Produce complex-valued eigenvectors from LAPACK DGGEV real-valued output
"""
# - see LAPACK man page DGGEV at ALPHAI
v = numpy.array(vin, dtype=dtype)
m = (w.imag > 0)
m[:-1] |= (w.imag[1:] < 0) # workaround for LAPACK bug, cf. ticket #709
for i in flatnonzero(m):
v.imag[:, i] = vin[:, i+1]
conj(v[:, i], v[:, i+1])
return v
def _geneig(a1, b1, left, right, overwrite_a, overwrite_b):
ggev, = get_lapack_funcs(('ggev',), (a1, b1))
cvl, cvr = left, right
res = ggev(a1, b1, lwork=-1)
lwork = res[-2][0].real.astype(numpy.int)
if ggev.typecode in 'cz':
alpha, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr, lwork,
overwrite_a, overwrite_b)
w = alpha / beta
else:
alphar, alphai, beta, vl, vr, work, info = ggev(a1, b1, cvl, cvr,
lwork, overwrite_a,
overwrite_b)
w = (alphar + _I * alphai) / beta
if info < 0:
raise ValueError('illegal value in %d-th argument of internal ggev' %
-info)
if info > 0:
raise LinAlgError("generalized eig algorithm did not converge "
"(info=%d)" % info)
only_real = numpy.logical_and.reduce(numpy.equal(w.imag, 0.0))
if not (ggev.typecode in 'cz' or only_real):
t = w.dtype.char
if left:
vl = _make_complex_eigvecs(w, vl, t)
if right:
vr = _make_complex_eigvecs(w, vr, t)
# the eigenvectors returned by the lapack function are NOT normalized
for i in xrange(vr.shape[0]):
if right:
vr[:, i] /= norm(vr[:, i])
if left:
vl[:, i] /= norm(vl[:, i])
if not (left or right):
return w
if left:
if right:
return w, vl, vr
return w, vl
return w, vr
def eig(a, b=None, left=False, right=True, overwrite_a=False,
overwrite_b=False, check_finite=True):
"""
Solve an ordinary or generalized eigenvalue problem of a square matrix.
Find eigenvalues w and right or left eigenvectors of a general matrix::
a vr[:,i] = w[i] b vr[:,i]
a.H vl[:,i] = w[i].conj() b.H vl[:,i]
where ``.H`` is the Hermitian conjugation.
Parameters
----------
a : (M, M) array_like
A complex or real matrix whose eigenvalues and eigenvectors
will be computed.
b : (M, M) array_like, optional
Right-hand side matrix in a generalized eigenvalue problem.
Default is None, identity matrix is assumed.
left : bool, optional
Whether to calculate and return left eigenvectors. Default is False.
right : bool, optional
Whether to calculate and return right eigenvectors. Default is True.
overwrite_a : bool, optional
Whether to overwrite `a`; may improve performance. Default is False.
overwrite_b : bool, optional
Whether to overwrite `b`; may improve performance. Default is False.
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
w : (M,) double or complex ndarray
The eigenvalues, each repeated according to its multiplicity.
vl : (M, M) double or complex ndarray
The normalized left eigenvector corresponding to the eigenvalue
``w[i]`` is the column vl[:,i]. Only returned if ``left=True``.
vr : (M, M) double or complex ndarray
The normalized right eigenvector corresponding to the eigenvalue
``w[i]`` is the column ``vr[:,i]``. Only returned if ``right=True``.
Raises
------
LinAlgError
If eigenvalue computation does not converge.
See Also
--------
eigh : Eigenvalues and right eigenvectors for symmetric/Hermitian arrays.
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
if b is not None:
b1 = _asarray_validated(b, check_finite=check_finite)
overwrite_b = overwrite_b or _datacopied(b1, b)
if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:
raise ValueError('expected square matrix')
if b1.shape != a1.shape:
raise ValueError('a and b must have the same shape')
return _geneig(a1, b1, left, right, overwrite_a, overwrite_b)
geev, geev_lwork = get_lapack_funcs(('geev', 'geev_lwork'), (a1,))
compute_vl, compute_vr = left, right
lwork, info = geev_lwork(a1.shape[0],
compute_vl=compute_vl,
compute_vr=compute_vr)
if info != 0:
raise LinAlgError("internal *geev work array calculation failed: %d" %
(info,))
lwork = int(lwork.real)
if geev.typecode in 'cz':
w, vl, vr, info = geev(a1, lwork=lwork,
compute_vl=compute_vl,
compute_vr=compute_vr,
overwrite_a=overwrite_a)
else:
wr, wi, vl, vr, info = geev(a1, lwork=lwork,
compute_vl=compute_vl,
compute_vr=compute_vr,
overwrite_a=overwrite_a)
t = {'f': 'F', 'd': 'D'}[wr.dtype.char]
w = wr + _I * wi
if info < 0:
raise ValueError('illegal value in %d-th argument of internal geev' %
-info)
if info > 0:
raise LinAlgError("eig algorithm did not converge (only eigenvalues "
"with order >= %d have converged)" % info)
only_real = numpy.logical_and.reduce(numpy.equal(w.imag, 0.0))
if not (geev.typecode in 'cz' or only_real):
t = w.dtype.char
if left:
vl = _make_complex_eigvecs(w, vl, t)
if right:
vr = _make_complex_eigvecs(w, vr, t)
if not (left or right):
return w
if left:
if right:
return w, vl, vr
return w, vl
return w, vr
def eigh(a, b=None, lower=True, eigvals_only=False, overwrite_a=False,
overwrite_b=False, turbo=True, eigvals=None, type=1,
check_finite=True):
"""
Solve an ordinary or generalized eigenvalue problem for a complex
Hermitian or real symmetric matrix.
Find eigenvalues w and optionally eigenvectors v of matrix `a`, where
`b` is positive definite::
a v[:,i] = w[i] b v[:,i]
v[i,:].conj() a v[:,i] = w[i]
v[i,:].conj() b v[:,i] = 1
Parameters
----------
a : (M, M) array_like
A complex Hermitian or real symmetric matrix whose eigenvalues and
eigenvectors will be computed.
b : (M, M) array_like, optional
A complex Hermitian or real symmetric definite positive matrix in.
If omitted, identity matrix is assumed.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of `a`. (Default: lower)
eigvals_only : bool, optional
Whether to calculate only eigenvalues and no eigenvectors.
(Default: both are calculated)
turbo : bool, optional
Use divide and conquer algorithm (faster but expensive in memory,
only for generalized eigenvalue problem and if eigvals=None)
eigvals : tuple (lo, hi), optional
Indexes of the smallest and largest (in ascending order) eigenvalues
and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1.
If omitted, all eigenvalues and eigenvectors are returned.
type : int, optional
Specifies the problem type to be solved:
type = 1: a v[:,i] = w[i] b v[:,i]
type = 2: a b v[:,i] = w[i] v[:,i]
type = 3: b a v[:,i] = w[i] v[:,i]
overwrite_a : bool, optional
Whether to overwrite data in `a` (may improve performance)
overwrite_b : bool, optional
Whether to overwrite data in `b` (may improve performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
w : (N,) float ndarray
The N (1<=N<=M) selected eigenvalues, in ascending order, each
repeated according to its multiplicity.
v : (M, N) complex ndarray
(if eigvals_only == False)
The normalized selected eigenvector corresponding to the
eigenvalue w[i] is the column v[:,i].
Normalization:
type 1 and 3: v.conj() a v = w
type 2: inv(v).conj() a inv(v) = w
type = 1 or 2: v.conj() b v = I
type = 3: v.conj() inv(b) v = I
Raises
------
LinAlgError :
If eigenvalue computation does not converge,
an error occurred, or b matrix is not definite positive. Note that
if input matrices are not symmetric or hermitian, no error is reported
but results will be wrong.
See Also
--------
eig : eigenvalues and right eigenvectors for non-symmetric arrays
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or a1.shape[0] != a1.shape[1]:
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
if iscomplexobj(a1):
cplx = True
else:
cplx = False
if b is not None:
b1 = _asarray_validated(b, check_finite=check_finite)
overwrite_b = overwrite_b or _datacopied(b1, b)
if len(b1.shape) != 2 or b1.shape[0] != b1.shape[1]:
raise ValueError('expected square matrix')
if b1.shape != a1.shape:
raise ValueError("wrong b dimensions %s, should "
"be %s" % (str(b1.shape), str(a1.shape)))
if iscomplexobj(b1):
cplx = True
else:
cplx = cplx or False
else:
b1 = None
# Set job for fortran routines
_job = (eigvals_only and 'N') or 'V'
# port eigenvalue range from python to fortran convention
if eigvals is not None:
lo, hi = eigvals
if lo < 0 or hi >= a1.shape[0]:
raise ValueError('The eigenvalue range specified is not valid.\n'
'Valid range is [%s,%s]' % (0, a1.shape[0]-1))
lo += 1
hi += 1
eigvals = (lo, hi)
# set lower
if lower:
uplo = 'L'
else:
uplo = 'U'
# fix prefix for lapack routines
if cplx:
pfx = 'he'
else:
pfx = 'sy'
# Standard Eigenvalue Problem
# Use '*evr' routines
# FIXME: implement calculation of optimal lwork
# for all lapack routines
if b1 is None:
(evr,) = get_lapack_funcs((pfx+'evr',), (a1,))
if eigvals is None:
w, v, info = evr(a1, uplo=uplo, jobz=_job, range="A", il=1,
iu=a1.shape[0], overwrite_a=overwrite_a)
else:
(lo, hi) = eigvals
w_tot, v, info = evr(a1, uplo=uplo, jobz=_job, range="I",
il=lo, iu=hi, overwrite_a=overwrite_a)
w = w_tot[0:hi-lo+1]
# Generalized Eigenvalue Problem
else:
# Use '*gvx' routines if range is specified
if eigvals is not None:
(gvx,) = get_lapack_funcs((pfx+'gvx',), (a1, b1))
(lo, hi) = eigvals
w_tot, v, ifail, info = gvx(a1, b1, uplo=uplo, iu=hi,
itype=type, jobz=_job, il=lo,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
w = w_tot[0:hi-lo+1]
# Use '*gvd' routine if turbo is on and no eigvals are specified
elif turbo:
(gvd,) = get_lapack_funcs((pfx+'gvd',), (a1, b1))
v, w, info = gvd(a1, b1, uplo=uplo, itype=type, jobz=_job,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
# Use '*gv' routine if turbo is off and no eigvals are specified
else:
(gv,) = get_lapack_funcs((pfx+'gv',), (a1, b1))
v, w, info = gv(a1, b1, uplo=uplo, itype=type, jobz=_job,
overwrite_a=overwrite_a,
overwrite_b=overwrite_b)
# Check if we had a successful exit
if info == 0:
if eigvals_only:
return w
else:
return w, v
elif info < 0:
raise LinAlgError("illegal value in %i-th argument of internal"
" fortran routine." % (-info))
elif info > 0 and b1 is None:
raise LinAlgError("unrecoverable internal error.")
# The algorithm failed to converge.
elif info > 0 and info <= b1.shape[0]:
if eigvals is not None:
raise LinAlgError("the eigenvectors %s failed to"
" converge." % nonzero(ifail)-1)
else:
raise LinAlgError("internal fortran routine failed to converge: "
"%i off-diagonal elements of an "
"intermediate tridiagonal form did not converge"
" to zero." % info)
# This occurs when b is not positive definite
else:
raise LinAlgError("the leading minor of order %i"
" of 'b' is not positive definite. The"
" factorization of 'b' could not be completed"
" and no eigenvalues or eigenvectors were"
" computed." % (info-b1.shape[0]))
def eig_banded(a_band, lower=False, eigvals_only=False, overwrite_a_band=False,
select='a', select_range=None, max_ev=0, check_finite=True):
"""
Solve real symmetric or complex hermitian band matrix eigenvalue problem.
Find eigenvalues w and optionally right eigenvectors v of a::
a v[:,i] = w[i] v[:,i]
v.H v = identity
The matrix a is stored in a_band either in lower diagonal or upper
diagonal ordered form:
a_band[u + i - j, j] == a[i,j] (if upper form; i <= j)
a_band[ i - j, j] == a[i,j] (if lower form; i >= j)
where u is the number of bands above the diagonal.
Example of a_band (shape of a is (6,6), u=2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
a_band : (u+1, M) array_like
The bands of the M by M matrix a.
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
eigvals_only : bool, optional
Compute only the eigenvalues and no eigenvectors.
(Default: calculate also eigenvectors)
overwrite_a_band : bool, optional
Discard data in a_band (may enhance performance)
select : {'a', 'v', 'i'}, optional
Which eigenvalues to calculate
====== ========================================
select calculated
====== ========================================
'a' All eigenvalues
'v' Eigenvalues in the interval (min, max]
'i' Eigenvalues with indices min <= i <= max
====== ========================================
select_range : (min, max), optional
Range of selected eigenvalues
max_ev : int, optional
For select=='v', maximum number of eigenvalues expected.
For other values of select, has no meaning.
In doubt, leave this parameter untouched.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
w : (M,) ndarray
The eigenvalues, in ascending order, each repeated according to its
multiplicity.
v : (M, M) float or complex ndarray
The normalized eigenvector corresponding to the eigenvalue w[i] is
the column v[:,i].
Raises LinAlgError if eigenvalue computation does not converge
"""
if eigvals_only or overwrite_a_band:
a1 = _asarray_validated(a_band, check_finite=check_finite)
overwrite_a_band = overwrite_a_band or (_datacopied(a1, a_band))
else:
a1 = array(a_band)
if issubclass(a1.dtype.type, inexact) and not isfinite(a1).all():
raise ValueError("array must not contain infs or NaNs")
overwrite_a_band = 1
if len(a1.shape) != 2:
raise ValueError('expected two-dimensional array')
if select.lower() not in [0, 1, 2, 'a', 'v', 'i', 'all', 'value', 'index']:
raise ValueError('invalid argument for select')
if select.lower() in [0, 'a', 'all']:
if a1.dtype.char in 'GFD':
bevd, = get_lapack_funcs(('hbevd',), (a1,))
# FIXME: implement this somewhen, for now go with builtin values
# FIXME: calc optimal lwork by calling ?hbevd(lwork=-1)
# or by using calc_lwork.f ???
# lwork = calc_lwork.hbevd(bevd.typecode, a1.shape[0], lower)
internal_name = 'hbevd'
else: # a1.dtype.char in 'fd':
bevd, = get_lapack_funcs(('sbevd',), (a1,))
# FIXME: implement this somewhen, for now go with builtin values
# see above
# lwork = calc_lwork.sbevd(bevd.typecode, a1.shape[0], lower)
internal_name = 'sbevd'
w, v, info = bevd(a1, compute_v=not eigvals_only,
lower=lower, overwrite_ab=overwrite_a_band)
if select.lower() in [1, 2, 'i', 'v', 'index', 'value']:
# calculate certain range only
if select.lower() in [2, 'i', 'index']:
select = 2
vl, vu, il, iu = 0.0, 0.0, min(select_range), max(select_range)
if min(il, iu) < 0 or max(il, iu) >= a1.shape[1]:
raise ValueError('select_range out of bounds')
max_ev = iu - il + 1
else: # 1, 'v', 'value'
select = 1
vl, vu, il, iu = min(select_range), max(select_range), 0, 0
if max_ev == 0:
max_ev = a_band.shape[1]
if eigvals_only:
max_ev = 1
# calculate optimal abstol for dsbevx (see manpage)
if a1.dtype.char in 'fF': # single precision
lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='f'),))
else:
lamch, = get_lapack_funcs(('lamch',), (array(0, dtype='d'),))
abstol = 2 * lamch('s')
if a1.dtype.char in 'GFD':
bevx, = get_lapack_funcs(('hbevx',), (a1,))
internal_name = 'hbevx'
else: # a1.dtype.char in 'gfd'
bevx, = get_lapack_funcs(('sbevx',), (a1,))
internal_name = 'sbevx'
# il+1, iu+1: translate python indexing (0 ... N-1) into Fortran
# indexing (1 ... N)
w, v, m, ifail, info = bevx(a1, vl, vu, il+1, iu+1,
compute_v=not eigvals_only,
mmax=max_ev,
range=select, lower=lower,
overwrite_ab=overwrite_a_band,
abstol=abstol)
# crop off w and v
w = w[:m]
if not eigvals_only:
v = v[:, :m]
if info < 0:
raise ValueError('illegal value in %d-th argument of internal %s' %
(-info, internal_name))
if info > 0:
raise LinAlgError("eig algorithm did not converge")
if eigvals_only:
return w
return w, v
def eigvals(a, b=None, overwrite_a=False, check_finite=True):
"""
Compute eigenvalues from an ordinary or generalized eigenvalue problem.
Find eigenvalues of a general matrix::
a vr[:,i] = w[i] b vr[:,i]
Parameters
----------
a : (M, M) array_like
A complex or real matrix whose eigenvalues and eigenvectors
will be computed.
b : (M, M) array_like, optional
Right-hand side matrix in a generalized eigenvalue problem.
If omitted, identity matrix is assumed.
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
w : (M,) double or complex ndarray
The eigenvalues, each repeated according to its multiplicity,
but not in any specific order.
Raises
------
LinAlgError
If eigenvalue computation does not converge
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays,
eig : eigenvalues and right eigenvectors of general arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
"""
return eig(a, b=b, left=0, right=0, overwrite_a=overwrite_a,
check_finite=check_finite)
def eigvalsh(a, b=None, lower=True, overwrite_a=False,
overwrite_b=False, turbo=True, eigvals=None, type=1,
check_finite=True):
"""
Solve an ordinary or generalized eigenvalue problem for a complex
Hermitian or real symmetric matrix.
Find eigenvalues w of matrix a, where b is positive definite::
a v[:,i] = w[i] b v[:,i]
v[i,:].conj() a v[:,i] = w[i]
v[i,:].conj() b v[:,i] = 1
Parameters
----------
a : (M, M) array_like
A complex Hermitian or real symmetric matrix whose eigenvalues and
eigenvectors will be computed.
b : (M, M) array_like, optional
A complex Hermitian or real symmetric definite positive matrix in.
If omitted, identity matrix is assumed.
lower : bool, optional
Whether the pertinent array data is taken from the lower or upper
triangle of `a`. (Default: lower)
turbo : bool, optional
Use divide and conquer algorithm (faster but expensive in memory,
only for generalized eigenvalue problem and if eigvals=None)
eigvals : tuple (lo, hi), optional
Indexes of the smallest and largest (in ascending order) eigenvalues
and corresponding eigenvectors to be returned: 0 <= lo < hi <= M-1.
If omitted, all eigenvalues and eigenvectors are returned.
type : int, optional
Specifies the problem type to be solved:
type = 1: a v[:,i] = w[i] b v[:,i]
type = 2: a b v[:,i] = w[i] v[:,i]
type = 3: b a v[:,i] = w[i] v[:,i]
overwrite_a : bool, optional
Whether to overwrite data in `a` (may improve performance)
overwrite_b : bool, optional
Whether to overwrite data in `b` (may improve performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
w : (N,) float ndarray
The N (1<=N<=M) selected eigenvalues, in ascending order, each
repeated according to its multiplicity.
Raises
------
LinAlgError :
If eigenvalue computation does not converge,
an error occurred, or b matrix is not definite positive. Note that
if input matrices are not symmetric or hermitian, no error is reported
but results will be wrong.
See Also
--------
eigvals : eigenvalues of general arrays
eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
eig : eigenvalues and right eigenvectors for non-symmetric arrays
"""
return eigh(a, b=b, lower=lower, eigvals_only=True,
overwrite_a=overwrite_a, overwrite_b=overwrite_b,
turbo=turbo, eigvals=eigvals, type=type,
check_finite=check_finite)
def eigvals_banded(a_band, lower=False, overwrite_a_band=False,
select='a', select_range=None, check_finite=True):
"""
Solve real symmetric or complex hermitian band matrix eigenvalue problem.
Find eigenvalues w of a::
a v[:,i] = w[i] v[:,i]
v.H v = identity
The matrix a is stored in a_band either in lower diagonal or upper
diagonal ordered form:
a_band[u + i - j, j] == a[i,j] (if upper form; i <= j)
a_band[ i - j, j] == a[i,j] (if lower form; i >= j)
where u is the number of bands above the diagonal.
Example of a_band (shape of a is (6,6), u=2)::
upper form:
* * a02 a13 a24 a35
* a01 a12 a23 a34 a45
a00 a11 a22 a33 a44 a55
lower form:
a00 a11 a22 a33 a44 a55
a10 a21 a32 a43 a54 *
a20 a31 a42 a53 * *
Cells marked with * are not used.
Parameters
----------
a_band : (u+1, M) array_like
The bands of the M by M matrix a.
lower : bool, optional
Is the matrix in the lower form. (Default is upper form)
overwrite_a_band : bool, optional
Discard data in a_band (may enhance performance)
select : {'a', 'v', 'i'}, optional
Which eigenvalues to calculate
====== ========================================
select calculated
====== ========================================
'a' All eigenvalues
'v' Eigenvalues in the interval (min, max]
'i' Eigenvalues with indices min <= i <= max
====== ========================================
select_range : (min, max), optional
Range of selected eigenvalues
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
w : (M,) ndarray
The eigenvalues, in ascending order, each repeated according to its
multiplicity.
Raises LinAlgError if eigenvalue computation does not converge
See Also
--------
eig_banded : eigenvalues and right eigenvectors for symmetric/Hermitian
band matrices
eigvals : eigenvalues of general arrays
eigh : eigenvalues and right eigenvectors for symmetric/Hermitian arrays
eig : eigenvalues and right eigenvectors for non-symmetric arrays
"""
return eig_banded(a_band, lower=lower, eigvals_only=1,
overwrite_a_band=overwrite_a_band, select=select,
select_range=select_range, check_finite=check_finite)
_double_precision = ['i', 'l', 'd']
def hessenberg(a, calc_q=False, overwrite_a=False, check_finite=True):
"""
Compute Hessenberg form of a matrix.
The Hessenberg decomposition is::
A = Q H Q^H
where `Q` is unitary/orthogonal and `H` has only zero elements below
the first sub-diagonal.
Parameters
----------
a : (M, M) array_like
Matrix to bring into Hessenberg form.
calc_q : bool, optional
Whether to compute the transformation matrix. Default is False.
overwrite_a : bool, optional
Whether to overwrite `a`; may improve performance.
Default is False.
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
H : (M, M) ndarray
Hessenberg form of `a`.
Q : (M, M) ndarray
Unitary/orthogonal similarity transformation matrix ``A = Q H Q^H``.
Only returned if ``calc_q=True``.
"""
a1 = _asarray_validated(a, check_finite=check_finite)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
# if 2x2 or smaller: already in Hessenberg
if a1.shape[0] <= 2:
if calc_q:
return a1, numpy.eye(a1.shape[0])
return a1
gehrd, gebal, gehrd_lwork = get_lapack_funcs(('gehrd', 'gebal',
'gehrd_lwork'), (a1,))
ba, lo, hi, pivscale, info = gebal(a1, permute=0, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gebal '
'(hessenberg)' % -info)
n = len(a1)
lwork, info = gehrd_lwork(ba.shape[0], lo=lo, hi=hi)
if info != 0:
raise ValueError('failed to compute internal gehrd work array size. '
'LAPACK info = %d ' % info)
lwork = int(lwork.real)
hq, tau, info = gehrd(ba, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gehrd '
'(hessenberg)' % -info)
h = numpy.triu(hq, -1)
if not calc_q:
return h
# use orghr/unghr to compute q
orghr, orghr_lwork = get_lapack_funcs(('orghr', 'orghr_lwork'), (a1,))
lwork, info = orghr_lwork(n, lo=lo, hi=hi)
if info != 0:
raise ValueError('failed to compute internal orghr work array size. '
'LAPACK info = %d ' % info)
lwork = int(lwork.real)
q, info = orghr(a=hq, tau=tau, lo=lo, hi=hi, lwork=lwork, overwrite_a=1)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal orghr '
'(hessenberg)' % -info)
return h, q
| bsd-3-clause |
waytai/odoo | addons/website_event_track/models/event.py | 300 | 8344 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.addons.website.models.website import slug
import pytz
class event_track_tag(osv.osv):
_name = "event.track.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Track Tag', translate=True)
}
class event_tag(osv.osv):
_name = "event.tag"
_order = 'name'
_columns = {
'name': fields.char('Event Tag', translate=True)
}
#
# Tracks: conferences
#
class event_track_stage(osv.osv):
_name = "event.track.stage"
_order = 'sequence'
_columns = {
'name': fields.char('Track Stage', translate=True),
'sequence': fields.integer('Sequence')
}
_defaults = {
'sequence': 0
}
class event_track_location(osv.osv):
_name = "event.track.location"
_columns = {
'name': fields.char('Track Rooms')
}
class event_track(osv.osv):
_name = "event.track"
_description = 'Event Tracks'
_order = 'priority, date'
_inherit = ['mail.thread', 'ir.needaction_mixin', 'website.seo.metadata']
def _website_url(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, '')
for track in self.browse(cr, uid, ids, context=context):
res[track.id] = "/event/%s/track/%s" % (slug(track.event_id), slug(track))
return res
_columns = {
'name': fields.char('Track Title', required=True, translate=True),
'user_id': fields.many2one('res.users', 'Responsible'),
'speaker_ids': fields.many2many('res.partner', string='Speakers'),
'tag_ids': fields.many2many('event.track.tag', string='Tags'),
'stage_id': fields.many2one('event.track.stage', 'Stage'),
'description': fields.html('Track Description', translate=True),
'date': fields.datetime('Track Date'),
'duration': fields.float('Duration', digits=(16,2)),
'location_id': fields.many2one('event.track.location', 'Location'),
'event_id': fields.many2one('event.event', 'Event', required=True),
'color': fields.integer('Color Index'),
'priority': fields.selection([('3','Low'),('2','Medium (*)'),('1','High (**)'),('0','Highest (***)')], 'Priority', required=True),
'website_published': fields.boolean('Available in the website', copy=False),
'website_url': fields.function(_website_url, string="Website url", type="char"),
'image': fields.related('speaker_ids', 'image', type='binary', readonly=True)
}
def set_priority(self, cr, uid, ids, priority, context={}):
return self.write(cr, uid, ids, {'priority' : priority})
def _default_stage_id(self, cr, uid, context={}):
stage_obj = self.pool.get('event.track.stage')
ids = stage_obj.search(cr, uid, [], context=context)
return ids and ids[0] or False
_defaults = {
'user_id': lambda self, cr, uid, ctx: uid,
'website_published': lambda self, cr, uid, ctx: False,
'duration': lambda *args: 1.5,
'stage_id': _default_stage_id,
'priority': '2'
}
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
stage_obj = self.pool.get('event.track.stage')
result = stage_obj.name_search(cr, uid, '', context=context)
return result, {}
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
#
# Events
#
class event_event(osv.osv):
_inherit = "event.event"
def _list_tz(self,cr,uid, context=None):
# put POSIX 'Etc/*' entries at the end to avoid confusing users - see bug 1086728
return [(tz,tz) for tz in sorted(pytz.all_timezones, key=lambda tz: tz if not tz.startswith('Etc/') else '_')]
def _count_tracks(self, cr, uid, ids, field_name, arg, context=None):
return {
event.id: len(event.track_ids)
for event in self.browse(cr, uid, ids, context=context)
}
def _get_tracks_tag_ids(self, cr, uid, ids, field_names, arg=None, context=None):
res = dict((res_id, []) for res_id in ids)
for event in self.browse(cr, uid, ids, context=context):
for track in event.track_ids:
res[event.id] += [tag.id for tag in track.tag_ids]
res[event.id] = list(set(res[event.id]))
return res
_columns = {
'tag_ids': fields.many2many('event.tag', string='Tags'),
'track_ids': fields.one2many('event.track', 'event_id', 'Tracks', copy=True),
'sponsor_ids': fields.one2many('event.sponsor', 'event_id', 'Sponsorships', copy=True),
'blog_id': fields.many2one('blog.blog', 'Event Blog'),
'show_track_proposal': fields.boolean('Talks Proposals'),
'show_tracks': fields.boolean('Multiple Tracks'),
'show_blog': fields.boolean('News'),
'count_tracks': fields.function(_count_tracks, type='integer', string='Tracks'),
'tracks_tag_ids': fields.function(_get_tracks_tag_ids, type='one2many', relation='event.track.tag', string='Tags of Tracks'),
'allowed_track_tag_ids': fields.many2many('event.track.tag', string='Accepted Tags', help="List of available tags for track proposals."),
'timezone_of_event': fields.selection(_list_tz, 'Event Timezone', size=64),
}
_defaults = {
'show_track_proposal': False,
'show_tracks': False,
'show_blog': False,
'timezone_of_event':lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).tz,
}
def _get_new_menu_pages(self, cr, uid, event, context=None):
context = context or {}
result = super(event_event, self)._get_new_menu_pages(cr, uid, event, context=context)
if event.show_tracks:
result.append( (_('Talks'), '/event/%s/track' % slug(event)))
result.append( (_('Agenda'), '/event/%s/agenda' % slug(event)))
if event.blog_id:
result.append( (_('News'), '/blogpost'+slug(event.blog_ig)))
if event.show_track_proposal:
result.append( (_('Talk Proposals'), '/event/%s/track_proposal' % slug(event)))
return result
#
# Sponsors
#
class event_sponsors_type(osv.osv):
_name = "event.sponsor.type"
_order = "sequence"
_columns = {
"name": fields.char('Sponsor Type', required=True, translate=True),
"sequence": fields.integer('Sequence')
}
class event_sponsors(osv.osv):
_name = "event.sponsor"
_order = "sequence"
_columns = {
'event_id': fields.many2one('event.event', 'Event', required=True),
'sponsor_type_id': fields.many2one('event.sponsor.type', 'Sponsoring Type', required=True),
'partner_id': fields.many2one('res.partner', 'Sponsor/Customer', required=True),
'url': fields.text('Sponsor Website'),
'sequence': fields.related('sponsor_type_id', 'sequence', string='Sequence', store=True),
'image_medium': fields.related('partner_id', 'image_medium', string='Logo', type='binary')
}
def has_access_to_partner(self, cr, uid, ids, context=None):
partner_ids = [sponsor.partner_id.id for sponsor in self.browse(cr, uid, ids, context=context)]
return len(partner_ids) == self.pool.get("res.partner").search(cr, uid, [("id", "in", partner_ids)], count=True, context=context)
| agpl-3.0 |
GehenHe/Recognize-Face-on-Android | tensorflow/contrib/legacy_seq2seq/__init__.py | 165 | 2433 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Deprecated library for creating sequence-to-sequence models in TensorFlow.
@@attention_decoder
@@basic_rnn_seq2seq
@@embedding_attention_decoder
@@embedding_attention_seq2seq
@@embedding_rnn_decoder
@@embedding_rnn_seq2seq
@@embedding_tied_rnn_seq2seq
@@model_with_buckets
@@one2many_rnn_seq2seq
@@rnn_decoder
@@sequence_loss
@@sequence_loss_by_example
@@tied_rnn_seq2seq
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import attention_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import basic_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_attention_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import embedding_tied_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import model_with_buckets
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import one2many_rnn_seq2seq
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import rnn_decoder
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import sequence_loss_by_example
from tensorflow.contrib.legacy_seq2seq.python.ops.seq2seq import tied_rnn_seq2seq
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 |
lukasfenix/namebench | nb_third_party/dns/rdtypes/ANY/NSEC3PARAM.py | 248 | 3161 | # Copyright (C) 2004-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.rdata
class NSEC3PARAM(dns.rdata.Rdata):
"""NSEC3PARAM record
@ivar algorithm: the hash algorithm number
@type algorithm: int
@ivar flags: the flags
@type flags: int
@ivar iterations: the number of iterations
@type iterations: int
@ivar salt: the salt
@type salt: string"""
__slots__ = ['algorithm', 'flags', 'iterations', 'salt']
def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
super(NSEC3PARAM, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.flags = flags
self.iterations = iterations
self.salt = salt
def to_text(self, origin=None, relativize=True, **kw):
if self.salt == '':
salt = '-'
else:
salt = self.salt.encode('hex-codec')
return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, salt)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
flags = tok.get_uint8()
iterations = tok.get_uint16()
salt = tok.get_string()
if salt == '-':
salt = ''
else:
salt = salt.decode('hex-codec')
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.salt)
file.write(struct.pack("!BBHB", self.algorithm, self.flags,
self.iterations, l))
file.write(self.salt)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
wire[current : current + 5])
current += 5
rdlen -= 5
salt = wire[current : current + slen]
current += slen
rdlen -= slen
if rdlen != 0:
raise dns.exception.FormError
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_wire = classmethod(from_wire)
def _cmp(self, other):
b1 = cStringIO.StringIO()
self.to_wire(b1)
b2 = cStringIO.StringIO()
other.to_wire(b2)
return cmp(b1.getvalue(), b2.getvalue())
| apache-2.0 |
j831/zulip | zerver/tests/test_type_debug.py | 7 | 5088 | from __future__ import print_function
import sys
from unittest import TestCase
from six.moves import cStringIO as StringIO
from zerver.lib.type_debug import print_types
from typing import Any, Callable, Dict, Iterable, Tuple, TypeVar
T = TypeVar('T')
def add(x=0, y=0):
# type: (Any, Any) -> Any
return x + y
def to_dict(v=[]):
# type: (Iterable[Tuple[Any, Any]]) -> Dict[Any, Any]
return dict(v)
class TypesPrintTest(TestCase):
# These 2 methods are needed to run tests with our custom test-runner
def _pre_setup(self):
# type: () -> None
pass
def _post_teardown(self):
# type: () -> None
pass
def check_signature(self, signature, retval, func, *args, **kwargs):
# type: (str, T, Callable[..., T], *Any, **Any) -> None
"""
Checks if print_types outputs `signature` when func is called with *args and **kwargs.
Do not decorate func with print_types before passing into this function.
func will be decorated with print_types within this function.
"""
try:
original_stdout = sys.stdout
sys.stdout = StringIO()
self.assertEqual(retval, print_types(func)(*args, **kwargs))
self.assertEqual(sys.stdout.getvalue().strip(), signature)
finally:
sys.stdout = original_stdout
def test_empty(self):
# type: () -> None
def empty_func():
# type: () -> None
pass
self.check_signature("empty_func() -> None", None, empty_func) # type: ignore # https://github.com/python/mypy/issues/1932
self.check_signature("<lambda>() -> None", None, (lambda: None)) # type: ignore # https://github.com/python/mypy/issues/1932
def test_basic(self):
# type: () -> None
self.check_signature("add(float, int) -> float",
5.0, add, 2.0, 3)
self.check_signature("add(float, y=int) -> float",
5.0, add, 2.0, y=3)
self.check_signature("add(x=int) -> int", 2, add, x=2)
self.check_signature("add() -> int", 0, add)
def test_list(self):
# type: () -> None
self.check_signature("add([], [str]) -> [str]",
['two'], add, [], ['two'])
self.check_signature("add([int], [str]) -> [int, ...]",
[2, 'two'], add, [2], ['two'])
self.check_signature("add([int, ...], y=[]) -> [int, ...]",
[2, 'two'], add, [2, 'two'], y=[])
def test_dict(self):
# type: () -> None
self.check_signature("to_dict() -> {}", {}, to_dict)
self.check_signature("to_dict([(int, str)]) -> {int: str}",
{2: 'two'}, to_dict, [(2, 'two')])
self.check_signature("to_dict(((int, str),)) -> {int: str}",
{2: 'two'}, to_dict, ((2, 'two'),))
self.check_signature("to_dict([(int, str), ...]) -> {int: str, ...}",
{1: 'one', 2: 'two'}, to_dict, [(1, 'one'), (2, 'two')])
def test_tuple(self):
# type: () -> None
self.check_signature("add((), ()) -> ()",
(), add, (), ())
self.check_signature("add((int,), (str,)) -> (int, str)",
(1, 'one'), add, (1,), ('one',))
self.check_signature("add(((),), ((),)) -> ((), ())",
((), ()), add, ((),), ((),))
def test_class(self):
# type: () -> None
class A(object):
pass
class B(str):
pass
self.check_signature("<lambda>(A) -> str", 'A', (lambda x: x.__class__.__name__), A())
self.check_signature("<lambda>(B) -> int", 5, (lambda x: len(x)), B("hello"))
def test_sequence(self):
# type: () -> None
class A(list):
pass
class B(list):
pass
self.check_signature("add(A([]), B([str])) -> [str]",
['two'], add, A([]), B(['two']))
self.check_signature("add(A([int]), B([str])) -> [int, ...]",
[2, 'two'], add, A([2]), B(['two']))
self.check_signature("add(A([int, ...]), y=B([])) -> [int, ...]",
[2, 'two'], add, A([2, 'two']), y=B([]))
def test_mapping(self):
# type: () -> None
class A(dict):
pass
def to_A(v=[]):
# type: (Iterable[Tuple[Any, Any]]) -> A
return A(v)
self.check_signature("to_A() -> A([])", A(()), to_A)
self.check_signature("to_A([(int, str)]) -> A([(int, str)])",
{2: 'two'}, to_A, [(2, 'two')])
self.check_signature("to_A([(int, str), ...]) -> A([(int, str), ...])",
{1: 'one', 2: 'two'}, to_A, [(1, 'one'), (2, 'two')])
self.check_signature("to_A(((int, str), (int, str))) -> A([(int, str), ...])",
{1: 'one', 2: 'two'}, to_A, ((1, 'one'), (2, 'two')))
| apache-2.0 |
egenchen/shadowsocks | shadowsocks/manager.py | 925 | 9692 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import errno
import traceback
import socket
import logging
import json
import collections
from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell
BUF_SIZE = 1506
STAT_SEND_LIMIT = 100
class Manager(object):
def __init__(self, config):
self._config = config
self._relays = {} # (tcprelay, udprelay)
self._loop = eventloop.EventLoop()
self._dns_resolver = asyncdns.DNSResolver()
self._dns_resolver.add_to_loop(self._loop)
self._statistics = collections.defaultdict(int)
self._control_client_addr = None
try:
manager_address = config['manager_address']
if ':' in manager_address:
addr = manager_address.rsplit(':', 1)
addr = addr[0], int(addr[1])
addrs = socket.getaddrinfo(addr[0], addr[1])
if addrs:
family = addrs[0][0]
else:
logging.error('invalid address: %s', manager_address)
exit(1)
else:
addr = manager_address
family = socket.AF_UNIX
self._control_socket = socket.socket(family,
socket.SOCK_DGRAM)
self._control_socket.bind(addr)
self._control_socket.setblocking(False)
except (OSError, IOError) as e:
logging.error(e)
logging.error('can not bind to manager address')
exit(1)
self._loop.add(self._control_socket,
eventloop.POLL_IN, self)
self._loop.add_periodic(self.handle_periodic)
port_password = config['port_password']
del config['port_password']
for port, password in port_password.items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
self.add_port(a_config)
def add_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.error("server already exists at %s:%d" % (config['server'],
port))
return
logging.info("adding server at %s:%d" % (config['server'], port))
t = tcprelay.TCPRelay(config, self._dns_resolver, False,
self.stat_callback)
u = udprelay.UDPRelay(config, self._dns_resolver, False,
self.stat_callback)
t.add_to_loop(self._loop)
u.add_to_loop(self._loop)
self._relays[port] = (t, u)
def remove_port(self, config):
port = int(config['server_port'])
servers = self._relays.get(port, None)
if servers:
logging.info("removing server at %s:%d" % (config['server'], port))
t, u = servers
t.close(next_tick=False)
u.close(next_tick=False)
del self._relays[port]
else:
logging.error("server not exist at %s:%d" % (config['server'],
port))
def handle_event(self, sock, fd, event):
if sock == self._control_socket and event == eventloop.POLL_IN:
data, self._control_client_addr = sock.recvfrom(BUF_SIZE)
parsed = self._parse_command(data)
if parsed:
command, config = parsed
a_config = self._config.copy()
if config:
# let the command override the configuration file
a_config.update(config)
if 'server_port' not in a_config:
logging.error('can not find server_port in config')
else:
if command == 'add':
self.add_port(a_config)
self._send_control_data(b'ok')
elif command == 'remove':
self.remove_port(a_config)
self._send_control_data(b'ok')
elif command == 'ping':
self._send_control_data(b'pong')
else:
logging.error('unknown command %s', command)
def _parse_command(self, data):
# commands:
# add: {"server_port": 8000, "password": "foobar"}
# remove: {"server_port": 8000"}
data = common.to_str(data)
parts = data.split(':', 1)
if len(parts) < 2:
return data, None
command, config_json = parts
try:
config = shell.parse_json_in_str(config_json)
return command, config
except Exception as e:
logging.error(e)
return None
def stat_callback(self, port, data_len):
self._statistics[port] += data_len
def handle_periodic(self):
r = {}
i = 0
def send_data(data_dict):
if data_dict:
# use compact JSON format (without space)
data = common.to_bytes(json.dumps(data_dict,
separators=(',', ':')))
self._send_control_data(b'stat: ' + data)
for k, v in self._statistics.items():
r[k] = v
i += 1
# split the data into segments that fit in UDP packets
if i >= STAT_SEND_LIMIT:
send_data(r)
r.clear()
send_data(r)
self._statistics.clear()
def _send_control_data(self, data):
if self._control_client_addr:
try:
self._control_socket.sendto(data, self._control_client_addr)
except (socket.error, OSError, IOError) as e:
error_no = eventloop.errno_from_exception(e)
if error_no in (errno.EAGAIN, errno.EINPROGRESS,
errno.EWOULDBLOCK):
return
else:
shell.print_exception(e)
if self._config['verbose']:
traceback.print_exc()
def run(self):
self._loop.run()
def run(config):
Manager(config).run()
def test():
import time
import threading
import struct
from shadowsocks import encrypt
logging.basicConfig(level=5,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
enc = []
eventloop.TIMEOUT_PRECISION = 1
def run_server():
config = {
'server': '127.0.0.1',
'local_port': 1081,
'port_password': {
'8381': 'foobar1',
'8382': 'foobar2'
},
'method': 'aes-256-cfb',
'manager_address': '127.0.0.1:6001',
'timeout': 60,
'fast_open': False,
'verbose': 2
}
manager = Manager(config)
enc.append(manager)
manager.run()
t = threading.Thread(target=run_server)
t.start()
time.sleep(1)
manager = enc[0]
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.connect(('127.0.0.1', 6001))
# test add and remove
time.sleep(1)
cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}')
time.sleep(1)
assert 7001 in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
cli.send(b'remove: {"server_port":8381}')
time.sleep(1)
assert 8381 not in manager._relays
data, addr = cli.recvfrom(1506)
assert b'ok' in data
logging.info('add and remove test passed')
# test statistics for TCP
header = common.pack_addr(b'google.com') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1,
header + b'GET /\r\n\r\n')
tcp_cli = socket.socket()
tcp_cli.connect(('127.0.0.1', 7001))
tcp_cli.send(data)
tcp_cli.recv(4096)
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = shell.parse_json_in_str(data)
assert '7001' in stats
logging.info('TCP statistics test passed')
# test statistics for UDP
header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80)
data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1,
header + b'test')
udp_cli = socket.socket(type=socket.SOCK_DGRAM)
udp_cli.sendto(data, ('127.0.0.1', 8382))
tcp_cli.close()
data, addr = cli.recvfrom(1506)
data = common.to_str(data)
assert data.startswith('stat: ')
data = data.split('stat:')[1]
stats = json.loads(data)
assert '8382' in stats
logging.info('UDP statistics test passed')
manager._loop.stop()
t.join()
if __name__ == '__main__':
test()
| apache-2.0 |
huiyi1990/grab | docs/ru/conf.py | 13 | 7081 | # -*- coding: utf-8 -*-
#
# Grab documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 9 11:04:59 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('_themes'))
sys.path.insert(0, ROOT)
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Grab'
copyright = u'2011, Grigoriy Petukhov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
import grab
version = grab.__version__
release = grab.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {
#'index_logo': '',
#}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Grabdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Grab.tex', u'Grab Documentation',
u'Grigority Petukhov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'grab', u'Grab Documentation',
[u'Grigority Petukhov'], 1)
]
| mit |
NelisVerhoef/scikit-learn | examples/neural_networks/plot_rbm_logistic_classification.py | 258 | 4609 | """
==============================================================
Restricted Boltzmann Machine features for digit classification
==============================================================
For greyscale image data where pixel values can be interpreted as degrees of
blackness on a white background, like handwritten digit recognition, the
Bernoulli Restricted Boltzmann machine model (:class:`BernoulliRBM
<sklearn.neural_network.BernoulliRBM>`) can perform effective non-linear
feature extraction.
In order to learn good latent representations from a small dataset, we
artificially generate more labeled data by perturbing the training data with
linear shifts of 1 pixel in each direction.
This example shows how to build a classification pipeline with a BernoulliRBM
feature extractor and a :class:`LogisticRegression
<sklearn.linear_model.LogisticRegression>` classifier. The hyperparameters
of the entire model (learning rate, hidden layer size, regularization)
were optimized by grid search, but the search is not reproduced here because
of runtime constraints.
Logistic regression on raw pixel values is presented for comparison. The
example shows that the features extracted by the BernoulliRBM help improve the
classification accuracy.
"""
from __future__ import print_function
print(__doc__)
# Authors: Yann N. Dauphin, Vlad Niculae, Gabriel Synnaeve
# License: BSD
import numpy as np
import matplotlib.pyplot as plt
from scipy.ndimage import convolve
from sklearn import linear_model, datasets, metrics
from sklearn.cross_validation import train_test_split
from sklearn.neural_network import BernoulliRBM
from sklearn.pipeline import Pipeline
###############################################################################
# Setting up
def nudge_dataset(X, Y):
"""
This produces a dataset 5 times bigger than the original one,
by moving the 8x8 images in X around by 1px to left, right, down, up
"""
direction_vectors = [
[[0, 1, 0],
[0, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[1, 0, 0],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 1],
[0, 0, 0]],
[[0, 0, 0],
[0, 0, 0],
[0, 1, 0]]]
shift = lambda x, w: convolve(x.reshape((8, 8)), mode='constant',
weights=w).ravel()
X = np.concatenate([X] +
[np.apply_along_axis(shift, 1, X, vector)
for vector in direction_vectors])
Y = np.concatenate([Y for _ in range(5)], axis=0)
return X, Y
# Load Data
digits = datasets.load_digits()
X = np.asarray(digits.data, 'float32')
X, Y = nudge_dataset(X, digits.target)
X = (X - np.min(X, 0)) / (np.max(X, 0) + 0.0001) # 0-1 scaling
X_train, X_test, Y_train, Y_test = train_test_split(X, Y,
test_size=0.2,
random_state=0)
# Models we will use
logistic = linear_model.LogisticRegression()
rbm = BernoulliRBM(random_state=0, verbose=True)
classifier = Pipeline(steps=[('rbm', rbm), ('logistic', logistic)])
###############################################################################
# Training
# Hyper-parameters. These were set by cross-validation,
# using a GridSearchCV. Here we are not performing cross-validation to
# save time.
rbm.learning_rate = 0.06
rbm.n_iter = 20
# More components tend to give better prediction performance, but larger
# fitting time
rbm.n_components = 100
logistic.C = 6000.0
# Training RBM-Logistic Pipeline
classifier.fit(X_train, Y_train)
# Training Logistic regression
logistic_classifier = linear_model.LogisticRegression(C=100.0)
logistic_classifier.fit(X_train, Y_train)
###############################################################################
# Evaluation
print()
print("Logistic regression using RBM features:\n%s\n" % (
metrics.classification_report(
Y_test,
classifier.predict(X_test))))
print("Logistic regression using raw pixel features:\n%s\n" % (
metrics.classification_report(
Y_test,
logistic_classifier.predict(X_test))))
###############################################################################
# Plotting
plt.figure(figsize=(4.2, 4))
for i, comp in enumerate(rbm.components_):
plt.subplot(10, 10, i + 1)
plt.imshow(comp.reshape((8, 8)), cmap=plt.cm.gray_r,
interpolation='nearest')
plt.xticks(())
plt.yticks(())
plt.suptitle('100 components extracted by RBM', fontsize=16)
plt.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23)
plt.show()
| bsd-3-clause |
le9i0nx/ansible | lib/ansible/utils/module_docs_fragments/ovirt_facts.py | 44 | 3242 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
class ModuleDocFragment(object):
# facts standard oVirt documentation fragment
DOCUMENTATION = '''
options:
fetch_nested:
description:
- "If I(True) the module will fetch additional data from the API."
- "It will fetch IDs of the VMs disks, snapshots, etc. User can configure to fetch other
attributes of the nested entities by specifying C(nested_attributes)."
version_added: "2.3"
nested_attributes:
description:
- "Specifies list of the attributes which should be fetched from the API."
- "This parameter apply only when C(fetch_nested) is I(true)."
version_added: "2.3"
auth:
required: True
description:
- "Dictionary with values needed to create HTTP/HTTPS connection to oVirt:"
- C(username)[I(required)] - The name of the user, something like I(admin@internal).
Default value is set by I(OVIRT_USERNAME) environment variable.
- "C(password)[I(required)] - The password of the user. Default value is set by I(OVIRT_PASSWORD) environment variable."
- "C(url)[I(required)] - A string containing the base URL of the server, usually
something like `I(https://server.example.com/ovirt-engine/api)`. Default value is set by I(OVIRT_URL) environment variable."
- "C(token) - Token to be used instead of login with username/password. Default value is set by I(OVIRT_TOKEN) environment variable."
- "C(insecure) - A boolean flag that indicates if the server TLS
certificate and host name should be checked."
- "C(ca_file) - A PEM file containing the trusted CA certificates. The
certificate presented by the server will be verified using these CA
certificates. If `C(ca_file)` parameter is not set, system wide
CA certificate store is used. Default value is set by I(OVIRT_CAFILE) environment variable."
- "C(kerberos) - A boolean flag indicating if Kerberos authentication
should be used instead of the default basic authentication."
- "C(headers) - Dictionary of HTTP headers to be added to each API call."
requirements:
- python >= 2.7
- ovirt-engine-sdk-python >= 4.0.0
notes:
- "In order to use this module you have to install oVirt Python SDK.
To ensure it's installed with correct version you can create the following task:
pip: name=ovirt-engine-sdk-python version=4.0.0"
'''
| gpl-3.0 |
40223150/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/xml/dom/__init__.py | 873 | 4019 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
__slots__ = ()
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from .domreg import getDOMImplementation, registerDOMImplementation
| gpl-3.0 |
GauravSahu/odoo | addons/product/wizard/product_price.py | 380 | 2254 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class product_price_list(osv.osv_memory):
_name = 'product.price_list'
_description = 'Price List'
_columns = {
'price_list': fields.many2one('product.pricelist', 'PriceList', required=True),
'qty1': fields.integer('Quantity-1'),
'qty2': fields.integer('Quantity-2'),
'qty3': fields.integer('Quantity-3'),
'qty4': fields.integer('Quantity-4'),
'qty5': fields.integer('Quantity-5'),
}
_defaults = {
'qty1': 1,
'qty2': 5,
'qty3': 10,
'qty4': 0,
'qty5': 0,
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@return : return report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['price_list','qty1', 'qty2','qty3','qty4','qty5'], context=context)
res = res and res[0] or {}
res['price_list'] = res['price_list'][0]
datas['form'] = res
return self.pool['report'].get_action(cr, uid, [], 'product.report_pricelist', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
boundarydevices/android_external_chromium_org | tools/telemetry/telemetry/results/buildbot_page_measurement_results_unittest.py | 9 | 19250 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
from telemetry import perf_tests_helper
from telemetry.page import page_set
from telemetry.value import list_of_scalar_values
from telemetry.value import scalar
from telemetry.results import base_test_results_unittest
from telemetry.results import buildbot_page_measurement_results
def _MakePageSet():
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddPageWithDefaultRunNavigate('http://www.foo.com/')
ps.AddPageWithDefaultRunNavigate('http://www.bar.com/')
ps.AddPageWithDefaultRunNavigate('http://www.baz.com/')
return ps
class SummarySavingPageMeasurementResults(
buildbot_page_measurement_results.BuildbotPageMeasurementResults):
def __init__(self, trace_tag=''):
super(SummarySavingPageMeasurementResults, self).__init__(
None, trace_tag=trace_tag)
self.results = []
def _PrintPerfResult(self, *args):
res = perf_tests_helper.PrintPerfResult(*args, print_to_stdout=False)
self.results.append(res)
class BuildbotPageMeasurementResultsTest(
base_test_results_unittest.BaseTestResultsUnittest):
def test_basic_summary(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a: http___www.bar.com_= 7 seconds',
'RESULT a: http___www.foo.com_= 3 seconds',
'*RESULT a: a= [3,7] seconds\nAvg a: 5.000000seconds\n' +
'Sd a: 2.828427seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_basic_summary_with_only_one_page(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.PrintSummary()
expected = ['*RESULT a: a= 3 seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_basic_summary_nonuniform_results(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.Add('b', 'seconds', 10)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 3)
measurement_results.Add('b', 'seconds', 10)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.WillMeasurePage(test_page_set.pages[2])
measurement_results.Add('a', 'seconds', 7)
# Note, page[2] does not report a 'b' metric.
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[2])
measurement_results.PrintSummary()
expected = ['RESULT a: http___www.bar.com_= 3 seconds',
'RESULT a: http___www.baz.com_= 7 seconds',
'RESULT a: http___www.foo.com_= 3 seconds',
'*RESULT a: a= [3,3,7] seconds\nAvg a: 4.333333seconds\n' +
'Sd a: 2.309401seconds',
'RESULT b: http___www.bar.com_= 10 seconds',
'RESULT b: http___www.foo.com_= 10 seconds',
'*RESULT b: b= [10,10] seconds\nAvg b: 10.000000seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_basic_summary_pass_and_fail_page(self):
"""If a page failed, only print summary for individual pages."""
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddFailureMessage(test_page_set.pages[0], 'message')
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a: http___www.bar.com_= 7 seconds',
'RESULT a: http___www.foo.com_= 3 seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 1 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_repeated_pageset_one_iteration_one_page_fails(self):
"""Page fails on one iteration, no averaged results should print."""
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddFailureMessage(test_page_set.pages[1], 'message')
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 4)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 8)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a: http___www.bar.com_= [7,8] seconds\n' +
'Avg a: 7.500000seconds\n' +
'Sd a: 0.707107seconds',
'RESULT a: http___www.foo.com_= [3,4] seconds\n' +
'Avg a: 3.500000seconds\n' +
'Sd a: 0.707107seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 1 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_repeated_pageset_one_iteration_one_page_error(self):
"""Page error on one iteration, no averaged results should print."""
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddErrorMessage(test_page_set.pages[1], 'message')
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 4)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 8)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a: http___www.bar.com_= [7,8] seconds\n' +
'Avg a: 7.500000seconds\n' +
'Sd a: 0.707107seconds',
'RESULT a: http___www.foo.com_= [3,4] seconds\n' +
'Avg a: 3.500000seconds\n' +
'Sd a: 0.707107seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 1 count']
self.assertEquals(expected, measurement_results.results)
def test_repeated_pageset(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 4)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 8)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a: http___www.bar.com_= [7,8] seconds\n' +
'Avg a: 7.500000seconds\n' +
'Sd a: 0.707107seconds',
'RESULT a: http___www.foo.com_= [3,4] seconds\n' +
'Avg a: 3.500000seconds\n' +
'Sd a: 0.707107seconds',
'*RESULT a: a= [3,7,4,8] seconds\n' +
'Avg a: 5.500000seconds\n' +
'Sd a: 2.380476seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count'
]
self.assertEquals(expected, measurement_results.results)
def test_repeated_pages(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'seconds', 4)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 7)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'seconds', 8)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = ['RESULT a: http___www.bar.com_= [7,8] seconds\n' +
'Avg a: 7.500000seconds\n' +
'Sd a: 0.707107seconds',
'RESULT a: http___www.foo.com_= [3,4] seconds\n' +
'Avg a: 3.500000seconds\n' +
'Sd a: 0.707107seconds',
'*RESULT a: a= [3,4,7,8] seconds\n' +
'Avg a: 5.500000seconds\n' +
'Sd a: 2.380476seconds',
'RESULT telemetry_page_measurement_results: ' +
'num_failed= 0 count',
'RESULT telemetry_page_measurement_results: ' +
'num_errored= 0 count'
]
self.assertEquals(expected, measurement_results.results)
def test_overall_results_trace_tag(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults(trace_tag='_ref')
measurement_results.AddSummaryValue(
scalar.ScalarValue(None, 'a', 'seconds', 1))
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', 2)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('b', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.AddSummaryValue(
scalar.ScalarValue(None, 'c', 'seconds', 4))
measurement_results.PrintSummary()
expected = [
'*RESULT b: b_ref= [2,3] seconds\n' +
'Avg b: 2.500000seconds\nSd b: 0.707107seconds',
'*RESULT a: a_ref= 1 seconds',
'*RESULT c: c_ref= 4 seconds',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_overall_results_page_runs_twice(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.AddSummaryValue(
scalar.ScalarValue(None, 'a', 'seconds', 1))
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', 2)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', 3)
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.PrintSummary()
expected = [
'RESULT b: http___www.foo.com_= [2,3] seconds\n' +
'Avg b: 2.500000seconds\nSd b: 0.707107seconds',
'*RESULT b: b= [2,3] seconds\n' +
'Avg b: 2.500000seconds\nSd b: 0.707107seconds',
'*RESULT a: a= 1 seconds',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_unimportant_results(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.AddSummaryValue(
scalar.ScalarValue(None, 'a', 'seconds', 1, important=False))
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', 2, data_type='unimportant')
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('b', 'seconds', 3, data_type='unimportant')
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
self.assertEquals(
measurement_results.results,
['RESULT b: http___www.bar.com_= 3 seconds',
'RESULT b: http___www.foo.com_= 2 seconds',
'RESULT b: b= [2,3] seconds\n' +
'Avg b: 2.500000seconds\nSd b: 0.707107seconds',
'RESULT a: a= 1 seconds',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count'])
def test_list_value(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.AddSummaryValue(
list_of_scalar_values.ListOfScalarValues(None, 'a', 'seconds', [1, 1]))
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('b', 'seconds', [2, 2])
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('b', 'seconds', [3, 3])
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = [
'RESULT b: http___www.bar.com_= [3,3] seconds\n' +
'Avg b: 3.000000seconds',
'RESULT b: http___www.foo.com_= [2,2] seconds\n' +
'Avg b: 2.000000seconds',
'*RESULT b: b= [2,2,3,3] seconds\nAvg b: 2.500000seconds\n' +
'Sd b: 0.577350seconds',
'*RESULT a: a= [1,1] seconds\nAvg a: 1.000000seconds',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
def test_histogram(self):
test_page_set = _MakePageSet()
measurement_results = SummarySavingPageMeasurementResults()
measurement_results.WillMeasurePage(test_page_set.pages[0])
measurement_results.Add('a', 'units',
'{"buckets": [{"low": 1, "high": 2, "count": 1}]}',
data_type='unimportant-histogram')
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[0])
measurement_results.WillMeasurePage(test_page_set.pages[1])
measurement_results.Add('a', 'units',
'{"buckets": [{"low": 2, "high": 3, "count": 1}]}',
data_type='unimportant-histogram')
measurement_results.DidMeasurePage()
measurement_results.AddSuccess(test_page_set.pages[1])
measurement_results.PrintSummary()
expected = [
'HISTOGRAM a: http___www.bar.com_= ' +
'{"buckets": [{"low": 2, "high": 3, "count": 1}]} units\n' +
'Avg a: 2.500000units',
'HISTOGRAM a: http___www.foo.com_= ' +
'{"buckets": [{"low": 1, "high": 2, "count": 1}]} units\n' +
'Avg a: 1.500000units',
'RESULT telemetry_page_measurement_results: num_failed= 0 count',
'RESULT telemetry_page_measurement_results: num_errored= 0 count']
self.assertEquals(expected, measurement_results.results)
| bsd-3-clause |
webmull/phantomjs | src/breakpad/src/tools/gyp/test/generator-output/gyptest-actions.py | 151 | 1953 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies --generator-output= behavior when using actions.
"""
import TestGyp
test = TestGyp.TestGyp()
# All the generated files should go under 'gypfiles'. The source directory
# ('actions') should be untouched.
test.writable(test.workpath('actions'), False)
test.run_gyp('actions.gyp',
'--generator-output=' + test.workpath('gypfiles'),
chdir='actions')
test.writable(test.workpath('actions'), True)
test.relocate('actions', 'relocate/actions')
test.relocate('gypfiles', 'relocate/gypfiles')
test.writable(test.workpath('relocate/actions'), False)
# Some of the action outputs use "pure" relative paths (i.e. without prefixes
# like <(INTERMEDIATE_DIR) or <(PROGRAM_DIR)). Even though we are building under
# 'gypfiles', such outputs will still be created relative to the original .gyp
# sources. Projects probably wouldn't normally do this, since it kind of defeats
# the purpose of '--generator-output', but it is supported behaviour.
test.writable(test.workpath('relocate/actions/build'), True)
test.writable(test.workpath('relocate/actions/subdir1/build'), True)
test.writable(test.workpath('relocate/actions/subdir1/actions-out'), True)
test.writable(test.workpath('relocate/actions/subdir2/build'), True)
test.writable(test.workpath('relocate/actions/subdir2/actions-out'), True)
test.build('actions.gyp', test.ALL, chdir='relocate/gypfiles')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/actions/subdir1'
else:
chdir = 'relocate/gypfiles'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/actions/subdir2/actions-out/file.out',
"Hello from make-file.py\n")
test.pass_test()
| bsd-3-clause |
gangadhar-kadam/sapphire_app | stock/doctype/bin/bin.py | 1 | 2522 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd.
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import add_days, cint,flt, nowdate, get_url_to_form, formatdate
from webnotes import msgprint, _
sql = webnotes.conn.sql
import webnotes.defaults
class DocType:
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def validate(self):
if self.doc.fields.get("__islocal") or not self.doc.stock_uom:
self.doc.stock_uom = webnotes.conn.get_value('Item', self.doc.item_code, 'stock_uom')
self.validate_mandatory()
self.doc.projected_qty = flt(self.doc.actual_qty) + flt(self.doc.ordered_qty) + \
flt(self.doc.indented_qty) + flt(self.doc.planned_qty) - flt(self.doc.reserved_qty)
def validate_mandatory(self):
qf = ['actual_qty', 'reserved_qty', 'ordered_qty', 'indented_qty']
for f in qf:
if (not self.doc.fields.has_key(f)) or (not self.doc.fields[f]):
self.doc.fields[f] = 0.0
def update_stock(self, args):
self.update_qty(args)
if args.get("actual_qty"):
from stock.stock_ledger import update_entries_after
if not args.get("posting_date"):
args["posting_date"] = nowdate()
# update valuation and qty after transaction for post dated entry
update_entries_after({
"item_code": self.doc.item_code,
"warehouse": self.doc.warehouse,
"posting_date": args.get("posting_date"),
"posting_time": args.get("posting_time")
})
def update_qty(self, args):
# update the stock values (for current quantities)
self.doc.actual_qty = flt(self.doc.actual_qty) + flt(args.get("actual_qty"))
self.doc.ordered_qty = flt(self.doc.ordered_qty) + flt(args.get("ordered_qty"))
self.doc.reserved_qty = flt(self.doc.reserved_qty) + flt(args.get("reserved_qty"))
self.doc.indented_qty = flt(self.doc.indented_qty) + flt(args.get("indented_qty"))
self.doc.planned_qty = flt(self.doc.planned_qty) + flt(args.get("planned_qty"))
self.doc.projected_qty = flt(self.doc.actual_qty) + flt(self.doc.ordered_qty) + \
flt(self.doc.indented_qty) + flt(self.doc.planned_qty) - flt(self.doc.reserved_qty)
self.doc.save()
def get_first_sle(self):
sle = sql("""
select * from `tabStock Ledger Entry`
where item_code = %s
and warehouse = %s
order by timestamp(posting_date, posting_time) asc, name asc
limit 1
""", (self.doc.item_code, self.doc.warehouse), as_dict=1)
return sle and sle[0] or None | agpl-3.0 |
olivierdalang/stdm | ui/composer/chart_type_register.py | 1 | 2725 | """
/***************************************************************************
Name : Graph Type UI Registry Classes
Description : Container for registering graph type UI settings.
Date : 16/April/2015
copyright : (C) 2014 by UN-Habitat and implementing partners.
See the accompanying file CONTRIBUTORS.txt in the root
email : stdm@unhabitat.org
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtGui import (
QWidget,
QIcon,
QApplication
)
from .chart_type_editors import (
VerticalBarGraphEditor
)
class ChartTypeUISettings(object):
registry = []
def __init__(self,parent=None):
self._parent_editor = parent
@classmethod
def register(cls, position=-1):
if position < 0 or position > len(ChartTypeUISettings.registry):
ChartTypeUISettings.registry.append(cls)
else:
ChartTypeUISettings.registry.index(position, cls)
def title(self):
"""
:return: Display of the graph type.
:rtype: str
"""
raise NotImplementedError
def icon(self):
"""
:return: Icon depicting the graph type.
:rtype: QIcon
"""
return None
def short_name(self):
"""
:return: Returns the shortname of the chart type. This should
match exactly to the one specified in the ChartConfiguration
subclass.
:rtype: str
"""
return ""
def editor(self):
"""
Editor for setting the graph series properties.
"""
raise NotImplementedError
class VerticalBarChartSettings(ChartTypeUISettings):
def icon(self):
return QIcon(":/plugins/stdm/images/icons/chart_bar.png")
def title(self):
return QApplication.translate("VerticalBarGraph", "Vertical Bar")
def editor(self):
return VerticalBarGraphEditor(self._parent_editor)
def short_name(self):
return "vbar"
VerticalBarChartSettings.register()
| gpl-2.0 |
chatcannon/scipy | scipy/weave/examples/wx_example.py | 100 | 7328 | """ This is taken from the scrolled window example from the demo.
Take a look at the DoDrawing2() method below. The first 6 lines
or so have been translated into C++.
"""
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
import inline_tools
from wxPython.wx import *
class MyCanvas(wxScrolledWindow):
def __init__(self, parent, id=-1, size=wxDefaultSize):
wxScrolledWindow.__init__(self, parent, id, wxPoint(0, 0), size, wxSUNKEN_BORDER)
self.lines = []
self.maxWidth = 1000
self.maxHeight = 1000
self.SetBackgroundColour(wxNamedColor("WHITE"))
EVT_LEFT_DOWN(self, self.OnLeftButtonEvent)
EVT_LEFT_UP(self, self.OnLeftButtonEvent)
EVT_MOTION(self, self.OnLeftButtonEvent)
EVT_PAINT(self, self.OnPaint)
self.SetCursor(wxStockCursor(wxCURSOR_PENCIL))
# bmp = images.getTest2Bitmap()
# mask = wxMaskColour(bmp, wxBLUE)
# bmp.SetMask(mask)
# self.bmp = bmp
self.SetScrollbars(20, 20, self.maxWidth/20, self.maxHeight/20)
def getWidth(self):
return self.maxWidth
def getHeight(self):
return self.maxHeight
def OnPaint(self, event):
dc = wxPaintDC(self)
self.PrepareDC(dc)
self.DoDrawing2(dc)
def DoDrawing(self, dc):
dc.BeginDrawing()
dc.SetPen(wxPen(wxNamedColour('RED')))
dc.DrawRectangle(5, 5, 50, 50)
dc.SetBrush(wxLIGHT_GREY_BRUSH)
dc.SetPen(wxPen(wxNamedColour('BLUE'), 4))
dc.DrawRectangle(15, 15, 50, 50)
dc.SetFont(wxFont(14, wxSWISS, wxNORMAL, wxNORMAL))
dc.SetTextForeground(wxColour(0xFF, 0x20, 0xFF))
te = dc.GetTextExtent("Hello World")
dc.DrawText("Hello World", 60, 65)
dc.SetPen(wxPen(wxNamedColour('VIOLET'), 4))
dc.DrawLine(5, 65+te[1], 60+te[0], 65+te[1])
lst = [(100,110), (150,110), (150,160), (100,160)]
dc.DrawLines(lst, -60)
dc.SetPen(wxGREY_PEN)
dc.DrawPolygon(lst, 75)
dc.SetPen(wxGREEN_PEN)
dc.DrawSpline(lst+[(100,100)])
# dc.DrawBitmap(self.bmp, 200, 20, true)
# dc.SetTextForeground(wxColour(0, 0xFF, 0x80))
# dc.DrawText("a bitmap", 200, 85)
font = wxFont(20, wxSWISS, wxNORMAL, wxNORMAL)
dc.SetFont(font)
dc.SetTextForeground(wxBLACK)
for a in range(0, 360, 45):
dc.DrawRotatedText("Rotated text...", 300, 300, a)
dc.SetPen(wxTRANSPARENT_PEN)
dc.SetBrush(wxBLUE_BRUSH)
dc.DrawRectangle(50,500,50,50)
dc.DrawRectangle(100,500,50,50)
dc.SetPen(wxPen(wxNamedColour('RED')))
dc.DrawEllipticArc(200, 500, 50, 75, 0, 90)
self.DrawSavedLines(dc)
dc.EndDrawing()
def DoDrawing2(self, dc):
red = wxNamedColour("RED")
blue = wxNamedColour("BLUE")
grey_brush = wxLIGHT_GREY_BRUSH
code = \
"""
//#line 108 "wx_example.py"
dc->BeginDrawing();
dc->SetPen(wxPen(*red,4,wxSOLID));
dc->DrawRectangle(5, 5, 50, 50);
dc->SetBrush(*grey_brush);
dc->SetPen(wxPen(*blue, 4,wxSOLID));
dc->DrawRectangle(15, 15, 50, 50);
"""
inline_tools.inline(code,['dc','red','blue','grey_brush'],verbose=2)
dc.SetFont(wxFont(14, wxSWISS, wxNORMAL, wxNORMAL))
dc.SetTextForeground(wxColour(0xFF, 0x20, 0xFF))
te = dc.GetTextExtent("Hello World")
dc.DrawText("Hello World", 60, 65)
dc.SetPen(wxPen(wxNamedColour('VIOLET'), 4))
dc.DrawLine(5, 65+te[1], 60+te[0], 65+te[1])
lst = [(100,110), (150,110), (150,160), (100,160)]
dc.DrawLines(lst, -60)
dc.SetPen(wxGREY_PEN)
dc.DrawPolygon(lst, 75)
dc.SetPen(wxGREEN_PEN)
dc.DrawSpline(lst+[(100,100)])
# dc.DrawBitmap(self.bmp, 200, 20, true)
# dc.SetTextForeground(wxColour(0, 0xFF, 0x80))
# dc.DrawText("a bitmap", 200, 85)
font = wxFont(20, wxSWISS, wxNORMAL, wxNORMAL)
dc.SetFont(font)
dc.SetTextForeground(wxBLACK)
for a in range(0, 360, 45):
dc.DrawRotatedText("Rotated text...", 300, 300, a)
dc.SetPen(wxTRANSPARENT_PEN)
dc.SetBrush(wxBLUE_BRUSH)
dc.DrawRectangle(50,500,50,50)
dc.DrawRectangle(100,500,50,50)
dc.SetPen(wxPen(wxNamedColour('RED')))
dc.DrawEllipticArc(200, 500, 50, 75, 0, 90)
self.DrawSavedLines(dc)
dc.EndDrawing()
def DrawSavedLines(self, dc):
dc.SetPen(wxPen(wxNamedColour('MEDIUM FOREST GREEN'), 4))
for line in self.lines:
for coords in line:
apply(dc.DrawLine, coords)
def SetXY(self, event):
self.x, self.y = self.ConvertEventCoords(event)
def ConvertEventCoords(self, event):
xView, yView = self.GetViewStart()
xDelta, yDelta = self.GetScrollPixelsPerUnit()
return (event.GetX() + (xView * xDelta),
event.GetY() + (yView * yDelta))
def OnLeftButtonEvent(self, event):
if event.LeftDown():
self.SetXY(event)
self.curLine = []
self.CaptureMouse()
elif event.Dragging():
dc = wxClientDC(self)
self.PrepareDC(dc)
dc.BeginDrawing()
dc.SetPen(wxPen(wxNamedColour('MEDIUM FOREST GREEN'), 4))
coords = (self.x, self.y) + self.ConvertEventCoords(event)
self.curLine.append(coords)
apply(dc.DrawLine, coords)
self.SetXY(event)
dc.EndDrawing()
elif event.LeftUp():
self.lines.append(self.curLine)
self.curLine = []
self.ReleaseMouse()
#---------------------------------------------------------------------------
# This example isn't currently used.
class py_canvas(wx.wxWindow):
def __init__(self, parent, id=-1, pos=wx.wxPyDefaultPosition,
size=wx.wxPyDefaultSize, **attr):
wx.wxWindow.__init__(self, parent, id, pos,size)
# wx.EVT_PAINT(self,self.on_paint)
background = wx.wxNamedColour('white')
code = """
self->SetBackgroundColour(*background);
"""
inline_tools.inline(code,['self','background'],compiler='msvc')
#----------------------------------------------------------------------------
class MyFrame(wxFrame):
def __init__(self, parent, ID, title, pos=wxDefaultPosition,
size=wxDefaultSize, style=wxDEFAULT_FRAME_STYLE):
wxFrame.__init__(self, parent, ID, title, pos, size, style)
# panel = wxPanel(self, -1)
self.GetSize()
# button = wxButton(panel, 1003, "Close Me")
# button.SetPosition(wxPoint(15, 15))
# EVT_BUTTON(self, 1003, self.OnCloseMe)
# EVT_CLOSE(self, self.OnCloseWindow)
# canvas = py_canvas(self,-1)
canvas = MyCanvas(self,-1)
canvas.Show(true)
class MyApp(wxApp):
def OnInit(self):
win = MyFrame(NULL, -1, "This is a wxFrame", size=(350, 200),
style = wxDEFAULT_FRAME_STYLE) # | wxFRAME_TOOL_WINDOW )
win.Show(true)
return true
if __name__ == "__main__":
app = MyApp(0)
app.MainLoop()
| bsd-3-clause |
luoyetx/mxnet | example/deep-embedded-clustering/autoencoder.py | 18 | 10317 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=missing-docstring, arguments-differ
from __future__ import print_function
import logging
import mxnet as mx
import numpy as np
import model
from solver import Solver, Monitor
class AutoEncoderModel(model.MXModel):
def setup(self, dims, sparseness_penalty=None, pt_dropout=None,
ft_dropout=None, input_act=None, internal_act='relu', output_act=None):
self.N = len(dims) - 1
self.dims = dims
self.stacks = []
self.pt_dropout = pt_dropout
self.ft_dropout = ft_dropout
self.input_act = input_act
self.internal_act = internal_act
self.output_act = output_act
self.data = mx.symbol.Variable('data')
for i in range(self.N):
if i == 0:
decoder_act = input_act
idropout = None
else:
decoder_act = internal_act
idropout = pt_dropout
if i == self.N-1:
encoder_act = output_act
odropout = None
else:
encoder_act = internal_act
odropout = pt_dropout
istack, iargs, iargs_grad, iargs_mult, iauxs = self.make_stack(
i, self.data, dims[i], dims[i+1], sparseness_penalty,
idropout, odropout, encoder_act, decoder_act
)
self.stacks.append(istack)
self.args.update(iargs)
self.args_grad.update(iargs_grad)
self.args_mult.update(iargs_mult)
self.auxs.update(iauxs)
self.encoder, self.internals = self.make_encoder(
self.data, dims, sparseness_penalty, ft_dropout, internal_act, output_act)
self.decoder = self.make_decoder(
self.encoder, dims, sparseness_penalty, ft_dropout, internal_act, input_act)
if input_act == 'softmax':
self.loss = self.decoder
else:
self.loss = mx.symbol.LinearRegressionOutput(data=self.decoder, label=self.data)
def make_stack(self, istack, data, num_input, num_hidden, sparseness_penalty=None,
idropout=None, odropout=None, encoder_act='relu', decoder_act='relu'):
x = data
if idropout:
x = mx.symbol.Dropout(data=x, p=idropout)
x = mx.symbol.FullyConnected(name='encoder_%d'%istack, data=x, num_hidden=num_hidden)
if encoder_act:
x = mx.symbol.Activation(data=x, act_type=encoder_act)
if encoder_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_encoder_%d' % istack, penalty=sparseness_penalty)
if odropout:
x = mx.symbol.Dropout(data=x, p=odropout)
x = mx.symbol.FullyConnected(name='decoder_%d'%istack, data=x, num_hidden=num_input)
if decoder_act == 'softmax':
x = mx.symbol.Softmax(data=x, label=data, prob_label=True, act_type=decoder_act)
elif decoder_act:
x = mx.symbol.Activation(data=x, act_type=decoder_act)
if decoder_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_decoder_%d' % istack, penalty=sparseness_penalty)
x = mx.symbol.LinearRegressionOutput(data=x, label=data)
else:
x = mx.symbol.LinearRegressionOutput(data=x, label=data)
args = {'encoder_%d_weight'%istack: mx.nd.empty((num_hidden, num_input), self.xpu),
'encoder_%d_bias'%istack: mx.nd.empty((num_hidden,), self.xpu),
'decoder_%d_weight'%istack: mx.nd.empty((num_input, num_hidden), self.xpu),
'decoder_%d_bias'%istack: mx.nd.empty((num_input,), self.xpu),}
args_grad = {'encoder_%d_weight'%istack: mx.nd.empty((num_hidden, num_input), self.xpu),
'encoder_%d_bias'%istack: mx.nd.empty((num_hidden,), self.xpu),
'decoder_%d_weight'%istack: mx.nd.empty((num_input, num_hidden), self.xpu),
'decoder_%d_bias'%istack: mx.nd.empty((num_input,), self.xpu),}
args_mult = {'encoder_%d_weight'%istack: 1.0,
'encoder_%d_bias'%istack: 2.0,
'decoder_%d_weight'%istack: 1.0,
'decoder_%d_bias'%istack: 2.0,}
auxs = {}
if encoder_act == 'sigmoid' and sparseness_penalty:
auxs['sparse_encoder_%d_moving_avg' % istack] = mx.nd.ones(num_hidden, self.xpu) * 0.5
if decoder_act == 'sigmoid' and sparseness_penalty:
auxs['sparse_decoder_%d_moving_avg' % istack] = mx.nd.ones(num_input, self.xpu) * 0.5
init = mx.initializer.Uniform(0.07)
for k, v in args.items():
init(mx.initializer.InitDesc(k), v)
return x, args, args_grad, args_mult, auxs
def make_encoder(self, data, dims, sparseness_penalty=None, dropout=None, internal_act='relu',
output_act=None):
x = data
internals = []
N = len(dims) - 1
for i in range(N):
x = mx.symbol.FullyConnected(name='encoder_%d'%i, data=x, num_hidden=dims[i+1])
if internal_act and i < N-1:
x = mx.symbol.Activation(data=x, act_type=internal_act)
if internal_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_encoder_%d' % i, penalty=sparseness_penalty)
elif output_act and i == N-1:
x = mx.symbol.Activation(data=x, act_type=output_act)
if output_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_encoder_%d' % i, penalty=sparseness_penalty)
if dropout:
x = mx.symbol.Dropout(data=x, p=dropout)
internals.append(x)
return x, internals
def make_decoder(self, feature, dims, sparseness_penalty=None, dropout=None,
internal_act='relu', input_act=None):
x = feature
N = len(dims) - 1
for i in reversed(range(N)):
x = mx.symbol.FullyConnected(name='decoder_%d'%i, data=x, num_hidden=dims[i])
if internal_act and i > 0:
x = mx.symbol.Activation(data=x, act_type=internal_act)
if internal_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_decoder_%d' % i, penalty=sparseness_penalty)
elif input_act and i == 0:
x = mx.symbol.Activation(data=x, act_type=input_act)
if input_act == 'sigmoid' and sparseness_penalty:
x = mx.symbol.IdentityAttachKLSparseReg(
data=x, name='sparse_decoder_%d' % i, penalty=sparseness_penalty)
if dropout and i > 0:
x = mx.symbol.Dropout(data=x, p=dropout)
return x
def layerwise_pretrain(self, X, batch_size, n_iter, optimizer, l_rate, decay,
lr_scheduler=None, print_every=1000):
def l2_norm(label, pred):
return np.mean(np.square(label-pred))/2.0
solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
lr_scheduler=lr_scheduler)
solver.set_metric(mx.metric.CustomMetric(l2_norm))
solver.set_monitor(Monitor(print_every))
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
last_batch_handle='roll_over')
for i in range(self.N):
if i == 0:
data_iter_i = data_iter
else:
X_i = list(model.extract_feature(
self.internals[i-1], self.args, self.auxs, data_iter, X.shape[0],
self.xpu).values())[0]
data_iter_i = mx.io.NDArrayIter({'data': X_i}, batch_size=batch_size,
last_batch_handle='roll_over')
logging.info('Pre-training layer %d...', i)
solver.solve(self.xpu, self.stacks[i], self.args, self.args_grad, self.auxs,
data_iter_i, 0, n_iter, {}, False)
def finetune(self, X, batch_size, n_iter, optimizer, l_rate, decay, lr_scheduler=None,
print_every=1000):
def l2_norm(label, pred):
return np.mean(np.square(label-pred))/2.0
solver = Solver(optimizer, momentum=0.9, wd=decay, learning_rate=l_rate,
lr_scheduler=lr_scheduler)
solver.set_metric(mx.metric.CustomMetric(l2_norm))
solver.set_monitor(Monitor(print_every))
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=True,
last_batch_handle='roll_over')
logging.info('Fine tuning...')
solver.solve(self.xpu, self.loss, self.args, self.args_grad, self.auxs, data_iter,
0, n_iter, {}, False)
def eval(self, X):
batch_size = 100
data_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
Y = list(model.extract_feature(
self.loss, self.args, self.auxs, data_iter, X.shape[0], self.xpu).values())[0]
return np.mean(np.square(Y-X))/2.0 | apache-2.0 |
algorhythms/LeetCode | 732 My Calendar III.py | 1 | 2130 | #!/usr/bin/python3
"""
Implement a MyCalendarThree class to store your events. A new event can always
be added.
Your class will have one method, book(int start, int end). Formally, this
represents a booking on the half open interval [start, end), the range of real
numbers x such that start <= x < end.
A K-booking happens when K events have some non-empty intersection (ie., there
is some time that is common to all K events.)
For each call to the method MyCalendar.book, return an integer K representing
the largest integer such that there exists a K-booking in the calendar.
Your class will be called like this: MyCalendarThree cal = new
MyCalendarThree(); MyCalendarThree.book(start, end)
Example 1:
MyCalendarThree();
MyCalendarThree.book(10, 20); // returns 1
MyCalendarThree.book(50, 60); // returns 1
MyCalendarThree.book(10, 40); // returns 2
MyCalendarThree.book(5, 15); // returns 3
MyCalendarThree.book(5, 10); // returns 3
MyCalendarThree.book(25, 55); // returns 3
Explanation:
The first two events can be booked and are disjoint, so the maximum K-booking
is a 1-booking.
The third event [10, 40) intersects the first event, and the maximum K-booking
is a 2-booking.
The remaining events cause the maximum K-booking to be only a 3-booking.
Note that the last event locally causes a 2-booking, but the answer is still 3
because
eg. [10, 20), [10, 40), and [5, 15) are still triple booked.
Note:
The number of calls to MyCalendarThree.book per test case will be at most 400.
In calls to MyCalendarThree.book(start, end), start and end are integers in the
range [0, 10^9].
"""
import bisect
class MyCalendarThree:
def __init__(self):
self.lst = []
def book(self, start: int, end: int) -> int:
bisect.insort(self.lst, (start, "start"))
bisect.insort(self.lst, (end, "end"))
ret = 0
count = 0
for _, flag in self.lst:
count += 1 if flag == "start" else -1
ret = max(ret, count)
return ret
# Your MyCalendarThree object will be instantiated and called as such:
# obj = MyCalendarThree()
# param_1 = obj.book(start,end)
| mit |
boldprogressives/akcode | settings.py | 1 | 4549 |
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
INTERNAL_IPS = ('127.0.0.1',
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# By default urllib, urllib2, and the like have no timeout which can cause
# some apache processes to hang until they are forced kill.
# Before python 2.6, the only way to cause them to time out is by setting
# the default timeout on the global socket
import socket
socket.setdefaulttimeout(5)
import os
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__))
MEDIA_ROOT = os.path.join(PROJECT_PATH, 'media/')
STATIC_ROOT = os.path.join(PROJECT_PATH, 'static/')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(PROJECT_PATH, 'common_static'),
)
GENSHI_TEMPLATE_DIR = os.path.join(PROJECT_PATH, "templates", "genshi")
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.transaction.TransactionMiddleware',
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
}
ROOT_URLCONF = 'urls'
TEMPLATE_DIRS = (
os.path.join(PROJECT_PATH, "templates"),
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'debug_toolbar',
'djangohelpers',
'actionkit',
'main',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.request",
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
AKCODE_GIT_REPO = None
# import local settings overriding the defaults
try:
from local_settings import *
except ImportError:
try:
from mod_python import apache
apache.log_error( "local settings not available", apache.APLOG_NOTICE )
except ImportError:
import sys
sys.stderr.write( "local settings not available\n" )
else:
try:
INSTALLED_APPS += LOCAL_INSTALLED_APPS
except NameError:
pass
try:
MIDDLEWARE_CLASSES += LOCAL_MIDDLEWARE_CLASSES
except NameError:
pass
| bsd-3-clause |
ybenitezf/miner | core/pydal/adapters/postgres.py | 3 | 18603 | # -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from ..drivers import psycopg2_adapt
from .._compat import PY2
from ..helpers.methods import varquote_aux
from .base import BaseAdapter
from ..objects import Expression
class PostgreSQLAdapter(BaseAdapter):
drivers = ('psycopg2','pg8000')
QUOTE_TEMPLATE = '"%s"'
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'SERIAL PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s %(null)s %(unique)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'GEOMETRY',
'geography': 'GEOGRAPHY',
'big-id': 'BIGSERIAL PRIMARY KEY',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s %(null)s %(unique)s',
'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def varquote(self, name):
return varquote_aux(name, '"%s"')
def adapt(self, obj):
if self.driver_name == 'psycopg2':
adapted = psycopg2_adapt(obj)
adapted.prepare(self.connection)
rv = adapted.getquoted()
if not PY2:
if isinstance(rv, bytes):
return rv.decode('utf-8')
return rv
elif self.driver_name == 'pg8000':
return "'%s'" % obj.replace("%", "%%").replace("'", "''")
else:
return "'%s'" % obj.replace("'", "''")
def sequence_name(self, table):
return self.QUOTE_TEMPLATE % (table + '_id_seq')
def RANDOM(self):
return 'RANDOM()'
def ADD(self, first, second):
t = first.type
if t in ('text','string','password', 'json', 'upload','blob'):
return '(%s || %s)' % (self.expand(first), self.expand(second, t))
else:
return '(%s + %s)' % (self.expand(first), self.expand(second, t))
def distributed_transaction_begin(self, key):
return
def prepare(self,key):
self.execute("PREPARE TRANSACTION '%s';" % key)
def commit_prepared(self,key):
self.execute("COMMIT PREPARED '%s';" % key)
def rollback_prepared(self,key):
self.execute("ROLLBACK PREPARED '%s';" % key)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
# self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
# self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
# % (table._tablename, table._fieldname, table._sequence_name))
self.execute(query)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
def __init__(self, db,uri, pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine="postgres"
self.uri = uri
if do_connect:
self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.srid = srid
self.find_or_make_work_folder()
self._last_insert = None # for INSERT ... RETURNING ID
self.TRUE_exp = 'TRUE'
self.FALSE_exp = 'FALSE'
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '5432'
sslmode = m.group('sslmode')
driver_args['database'] = db
driver_args['user'] = user
driver_args['host'] = host
driver_args['port'] = int(port)
driver_args['password'] = password
if sslmode:
driver_args['sslmode'] = sslmode
# choose diver according uri
if self.driver:
self.__version__ = "%s %s" % (self.driver.__name__,
self.driver.__version__)
else:
self.__version__ = None
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector=connector
if do_connect:
self.reconnect()
def after_connection(self):
#self.connection.set_client_encoding('UTF8') #pg8000 doesn't have a native set_client_encoding
self.execute("SET CLIENT_ENCODING TO 'UTF8'")
self.execute("SET standard_conforming_strings=on;")
self.try_json()
def _insert(self, table, fields):
table_rname = table.sqlsafe
if fields:
keys = ','.join(f.sqlsafe_name for f, v in fields)
values = ','.join(self.expand(v, f.type) for f, v in fields)
if hasattr(table, '_id'):
self._last_insert = (table._id, 1)
return 'INSERT INTO %s(%s) VALUES (%s) RETURNING %s;' % (
table_rname, keys, values, self.QUOTE_TEMPLATE % table._id.name)
else:
self._last_insert = None
return 'INSERT INTO %s(%s) VALUES (%s);' % (table_rname, keys, values)
else:
self._last_insert
return self._insert_empty(table)
def lastrowid(self, table=None):
if self._last_insert:
return int(self.cursor.fetchone()[0])
else:
self.execute("select lastval()")
return int(self.cursor.fetchone()[0])
def try_json(self):
if self.driver_name == "pg8000":
supports_json = self.connection._server_version >= "9.2.0"
elif (self.driver_name == "psycopg2" and
self.driver.__version__ >= "2.0.12"):
supports_json = self.connection.server_version >= 90200
elif self.driver_name == "zxJDBC":
supports_json = self.connection.dbversion >= "9.2.0"
else:
supports_json = None
if supports_json:
self.types["json"] = "JSON"
if ((self.driver_name == "psycopg2" and
self.driver.__version__ >= '2.5.0') or
(self.driver_name == "pg8000" and
self.driver.__version__ >= '1.10.2')):
self.driver_auto_json = ['loads']
else:
self.db.logger.debug("Your database version does not support the JSON"
" data type (using TEXT instead)")
def LIKE(self, first, second, escape=None):
"""Case sensitive like operator"""
if isinstance(second, Expression):
second = self.expand(second, 'string')
else:
second = self.expand(second, 'string')
if escape is None:
escape = '\\'
second = second.replace(escape, escape * 2)
if first.type not in ('string', 'text', 'json'):
return "(%s LIKE %s ESCAPE '%s')" % (
self.CAST(self.expand(first), 'CHAR(%s)' % first.length),
second, escape
)
else:
return "(%s LIKE %s ESCAPE '%s')" % (self.expand(first), second, escape)
def ILIKE(self, first, second, escape=None):
"""Case sensitive like operator"""
if isinstance(second, Expression):
second = self.expand(second, 'string')
else:
second = self.expand(second, 'string')
if escape is None:
escape = '\\'
second = second.replace(escape, escape * 2)
if first.type not in ('string', 'text', 'json', 'list:string'):
return "(%s ILIKE %s ESCAPE '%s')" % (
self.CAST(self.expand(first), 'CHAR(%s)' % first.length),
second, escape
)
else:
return "(%s ILIKE %s ESCAPE '%s')" % (self.expand(first), second, escape)
def REGEXP(self,first,second):
return '(%s ~ %s)' % (self.expand(first),
self.expand(second,'string'))
# GIS functions
def ST_ASGEOJSON(self, first, second):
"""
http://postgis.org/docs/ST_AsGeoJSON.html
"""
return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'],
self.expand(first), second['precision'], second['options'])
def ST_ASTEXT(self, first):
"""
http://postgis.org/docs/ST_AsText.html
"""
return 'ST_AsText(%s)' %(self.expand(first))
def ST_X(self, first):
"""
http://postgis.org/docs/ST_X.html
"""
return 'ST_X(%s)' %(self.expand(first))
def ST_Y(self, first):
"""
http://postgis.org/docs/ST_Y.html
"""
return 'ST_Y(%s)' %(self.expand(first))
def ST_CONTAINS(self, first, second):
"""
http://postgis.org/docs/ST_Contains.html
"""
return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
"""
http://postgis.org/docs/ST_Distance.html
"""
return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_EQUALS(self, first, second):
"""
http://postgis.org/docs/ST_Equals.html
"""
return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
"""
http://postgis.org/docs/ST_Intersects.html
"""
return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
"""
http://postgis.org/docs/ST_Overlaps.html
"""
return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_SIMPLIFY(self, first, second):
"""
http://postgis.org/docs/ST_Simplify.html
"""
return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
def ST_SIMPLIFYPRESERVETOPOLOGY(self, first, second):
"""
http://postgis.org/docs/ST_SimplifyPreserveTopology.html
"""
return 'ST_SimplifyPreserveTopology(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
def ST_TOUCHES(self, first, second):
"""
http://postgis.org/docs/ST_Touches.html
"""
return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_WITHIN(self, first, second):
"""
http://postgis.org/docs/ST_Within.html
"""
return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_DWITHIN(self, first, tup):
"""
http://postgis.org/docs/ST_DWithin.html
"""
second, third = tup
return 'ST_DWithin(%s,%s,%s)' %(self.expand(first),
self.expand(second, first.type),
self.expand(third, 'double'))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('geo'):
srid = 4326 # postGIS default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
parms = parms.split(',')
if len(parms) >= 2:
schema, srid = parms[:2]
if field_is_type('geometry'):
value = "ST_GeomFromText('%s',%s)" %(obj, srid)
elif field_is_type('geography'):
value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
# else:
# raise SyntaxError('Invalid field type %s' %fieldtype)
return value
return BaseAdapter.represent(self, obj, fieldtype)
def _drop(self, table, mode='restrict'):
if mode not in ['restrict', 'cascade', '']:
raise ValueError('Invalid mode: %s' % mode)
return ['DROP TABLE ' + table.sqlsafe + ' ' + str(mode) + ';']
def execute(self, *a, **b):
if PY2 and self.driver_name == "pg8000":
a = list(a)
a[0] = a[0].decode('utf8')
return BaseAdapter.execute(self, *a, **b)
class NewPostgreSQLAdapter(PostgreSQLAdapter):
drivers = ('psycopg2','pg8000')
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'SERIAL PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s %(null)s %(unique)s',
'list:integer': 'BIGINT[]',
'list:string': 'TEXT[]',
'list:reference': 'BIGINT[]',
'geometry': 'GEOMETRY',
'geography': 'GEOGRAPHY',
'big-id': 'BIGSERIAL PRIMARY KEY',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s %(null)s %(unique)s',
'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def parse_list_integers(self, value, field_type):
return value
def parse_list_references(self, value, field_type):
return [self.parse_reference(r, field_type[5:]) for r in value]
def parse_list_strings(self, value, field_type):
return value
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('list:'):
if not obj:
obj = []
elif not isinstance(obj, (list, tuple)):
obj = [obj]
if field_is_type('list:string'):
obj = map(str,obj)
else:
obj = map(int,obj)
return 'ARRAY[%s]' % ','.join(repr(item) for item in obj)
return PostgreSQLAdapter.represent(self, obj, fieldtype)
def CONTAINS(self, first, second, case_sensitive=True):
if first.type.startswith('list'):
f = self.expand(second, 'string')
s = self.ANY(first)
if case_sensitive is True:
return self.EQ(f, s)
else:
return self.ILIKE(f, s, escape='\\')
else:
return PostgreSQLAdapter.CONTAINS(self, first, second, case_sensitive=case_sensitive)
def ANY(self, first):
return "ANY(%s)" % self.expand(first)
def ILIKE(self, first, second, escape=None):
if first and 'type' not in first:
args = (first, self.expand(second))
ilike = '(%s ILIKE %s)' % args
else:
ilike = PostgreSQLAdapter.ILIKE(self, first, second, escape=escape)
return ilike
def EQ(self, first, second=None):
if first and 'type' not in first:
eq = '(%s = %s)' % (first, self.expand(second))
else:
eq = PostgreSQLAdapter.EQ(self, first, second)
return eq
class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
drivers = ('zxJDBC',)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>\[[^/]+\]|[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None ):
self.db = db
self.dbengine = "postgres"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '5432'
msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password)
def connector(msg=msg,driver_args=driver_args):
return self.driver.connect(*msg,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.set_client_encoding('UTF8')
self.execute('BEGIN;')
self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
self.try_json()
| gpl-2.0 |
pydanny/shortener_project | shortener/links/models.py | 1 | 1732 | from django.core.urlresolvers import reverse, NoReverseMatch
from django.db import models
from django.utils.baseconv import base64
from django.utils.translation import ugettext_lazy as _
from amazonify import amazonify
from model_utils.models import TimeStampedModel
from .validators import validate_five_characters
class Link(TimeStampedModel):
original_url = models.CharField(_("URL to be shortened"), max_length=255, unique=True)
identifier = models.CharField(_("Identifier"),
max_length=100,
blank=True,
validators=[validate_five_characters],
db_index=True)
def __unicode__(self):
return self.original_url
@property
def count(self):
return self.linklog_set.count()
def log(self, request):
self.linklog_set.create_from_request(
link=self,
request=request
)
def get_identifier_url(self):
try:
return reverse("links:redirect", kwargs={'identifier': self.identifier})
except NoReverseMatch:
return 'NADA'
@property
def tiny(self):
return base64.encode(self.pk)
def get_tiny_url(self):
return reverse("links:redirect", kwargs={'identifier': self.tiny})
def amazonify(self):
url = self.original_url
if url.startswith(
("https://amazon.",
"http://amazon.",
"http://amzn.",
"https://amzn.",
"https://www.amazon.",
"http://www.amazon.")
):
url = amazonify(url, "mlinar-20")
return url
| mit |
omni5cience/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/request.py | 567 | 5808 | # urllib3/request.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are encoded
in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the option
to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the
payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request signing,
such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example: ::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will be
overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {},
boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw)
| mit |
friedrich420/N4-AEL-KERNEL-LOLLIPOP | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
google-research/dice_rl | google/scripts/run_q_estimator.py | 1 | 4763 | # Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
import numpy as np
import os
import tensorflow.compat.v2 as tf
tf.compat.v1.enable_v2_behavior()
import pickle
from tf_agents.environments import gym_wrapper
from tf_agents.environments import tf_py_environment
from dice_rl.environments.env_policies import get_target_policy
from dice_rl.google.estimators.tabular_qlearning import TabularQLearning
from dice_rl.estimators import estimator as estimator_lib
import dice_rl.utils.common as common_utils
from dice_rl.data.dataset import Dataset, EnvStep, StepType
from dice_rl.data.tf_offpolicy_dataset import TFOffpolicyDataset
import google3.learning.deepmind.xmanager2.client.google as xm # pylint: disable=unused-import
FLAGS = flags.FLAGS
flags.DEFINE_string('env_name', 'taxi', 'Environment name.')
flags.DEFINE_integer('seed', 0, 'Initial random seed.')
flags.DEFINE_integer('num_trajectory', 100,
'Number of trajectories to collect.')
flags.DEFINE_integer('max_trajectory_length', 500,
'Cutoff trajectory at this step.')
flags.DEFINE_float('alpha', 0.0,
'How close to target policy.')
flags.DEFINE_bool('tabular_obs', True,
'Whether to use tabular observations.')
#flags.DEFINE_string('load_dir', '/cns/is-d/home/sherryy/teqdice/data',
flags.DEFINE_string('load_dir', '/cns/vz-d/home/brain-ofirnachum',
'Directory to load dataset from.')
flags.DEFINE_string('save_dir', None,
'Directory to save estimation results.')
flags.DEFINE_float('gamma', 0.995,
'Discount factor.')
flags.DEFINE_integer('limit_episodes', None,
'Number of episodes to take from dataset.')
def main(argv):
env_name = FLAGS.env_name
seed = FLAGS.seed
tabular_obs = FLAGS.tabular_obs
num_trajectory = FLAGS.num_trajectory
max_trajectory_length = FLAGS.max_trajectory_length
alpha = FLAGS.alpha
load_dir = FLAGS.load_dir
save_dir = FLAGS.save_dir
gamma = FLAGS.gamma
assert 0 <= gamma < 1.
limit_episodes = FLAGS.limit_episodes
target_policy = get_target_policy(load_dir, env_name, tabular_obs)
hparam_str = ('{ENV_NAME}_tabular{TAB}_alpha{ALPHA}_seed{SEED}_'
'numtraj{NUM_TRAJ}_maxtraj{MAX_TRAJ}').format(
ENV_NAME=env_name,
TAB=tabular_obs,
ALPHA=alpha,
SEED=seed,
NUM_TRAJ=num_trajectory,
MAX_TRAJ=max_trajectory_length)
directory = os.path.join(load_dir, hparam_str)
print('Loading dataset.')
dataset = Dataset.load(directory)
all_steps = dataset.get_all_steps()
max_reward = tf.reduce_max(all_steps.reward)
min_reward = tf.reduce_min(all_steps.reward)
print('num loaded steps', dataset.num_steps)
print('num loaded total steps', dataset.num_total_steps)
print('num loaded episodes', dataset.num_episodes)
print('num loaded total episodes', dataset.num_total_episodes)
print('min reward', min_reward, 'max reward', max_reward)
train_hparam_str = ('gamma{GAM}_limit{LIMIT}').format(
GAM=gamma,
LIMIT=limit_episodes)
estimate = estimator_lib.get_fullbatch_average(dataset, gamma=gamma)
print('data per step avg', estimate)
estimator = TabularQLearning(dataset.spec, gamma, num_qvalues=200,
perturbation_scale=[0.0, 0.01, 0.02, 0.05, 0.1, 0.2, 0.4, 1.],
default_reward_value=0.0,
limit_episodes=limit_episodes)
estimate = estimator.solve(dataset, target_policy)
print('estimated per step avg', estimate)
if save_dir is not None:
results_dir = os.path.join(save_dir, hparam_str)
if not tf.io.gfile.exists(results_dir):
tf.io.gfile.makedirs(results_dir)
results_filename = os.path.join(results_dir,
'results_%s.npy' % train_hparam_str)
with tf.io.gfile.GFile(results_filename, 'w') as f:
np.save(f, estimate)
print('Done!')
if __name__ == '__main__':
app.run(main)
| apache-2.0 |
nomedeusuariodesconhecido/info3180-lab4 | venv/lib/python2.7/site-packages/pip/utils/logging.py | 91 | 3186 | from __future__ import absolute_import
import contextlib
import logging
import logging.handlers
import os
try:
import threading
except ImportError:
import dummy_threading as threading
from pip.compat import WINDOWS
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_log_state = threading.local()
_log_state.indentation = 0
@contextlib.contextmanager
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emited inside it.
"""
_log_state.indentation += num
yield
_log_state.indentation -= num
def get_indentation():
return _log_state.indentation
class IndentingFormatter(logging.Formatter):
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = logging.Formatter.format(self, record)
formatted = "".join([
(" " * get_indentation()) + line
for line in formatted.splitlines(True)
])
return formatted
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
class ColorizedStreamHandler(logging.StreamHandler):
# Don't build up a list of colors if we don't have colorama
if colorama:
COLORS = [
# This needs to be in order from highest logging level to lowest.
(logging.ERROR, _color_wrap(colorama.Fore.RED)),
(logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
]
else:
COLORS = []
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
if WINDOWS and colorama:
self.stream = colorama.AnsiToWin32(self.stream)
def should_color(self):
# Don't colorize things if we do not have colorama
if not colorama:
return False
real_stream = (
self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
else self.stream.wrapped
)
# If the stream is a tty we should color it
if hasattr(real_stream, "isatty") and real_stream.isatty():
return True
# If we have an ASNI term we should color it
if os.environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def format(self, record):
msg = logging.StreamHandler.format(self, record)
if self.should_color():
for level, color in self.COLORS:
if record.levelno >= level:
msg = color(msg)
break
return msg
class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
def _open(self):
# Ensure the directory exists
if not os.path.exists(os.path.dirname(self.baseFilename)):
os.makedirs(os.path.dirname(self.baseFilename))
return logging.handlers.RotatingFileHandler._open(self)
| mit |
aeischeid/servo | components/script/dom/bindings/codegen/parser/WebIDL.py | 1 | 257429 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" A WebIDL parser. """
from ply import lex, yacc
import re
import os
import traceback
import math
from collections import defaultdict
# Machinery
def parseInt(literal):
string = literal
sign = 0
base = 0
if string[0] == '-':
sign = -1
string = string[1:]
else:
sign = 1
if string[0] == '0' and len(string) > 1:
if string[1] == 'x' or string[1] == 'X':
base = 16
string = string[2:]
else:
base = 8
string = string[1:]
else:
base = 10
value = int(string, base)
return value * sign
# Magic for creating enums
def M_add_class_attribs(attribs, start):
def foo(name, bases, dict_):
for v, k in enumerate(attribs):
dict_[k] = start + v
assert 'length' not in dict_
dict_['length'] = start + len(attribs)
return type(name, bases, dict_)
return foo
def enum(*names, **kw):
if len(kw) == 1:
base = kw['base'].__class__
start = base.length
else:
assert len(kw) == 0
base = object
start = 0
class Foo(base):
__metaclass__ = M_add_class_attribs(names, start)
def __setattr__(self, name, value): # this makes it read-only
raise NotImplementedError
return Foo()
class WebIDLError(Exception):
def __init__(self, message, locations, warning=False):
self.message = message
self.locations = [str(loc) for loc in locations]
self.warning = warning
def __str__(self):
return "%s: %s%s%s" % (self.warning and 'warning' or 'error',
self.message,
", " if len(self.locations) != 0 else "",
"\n".join(self.locations))
class Location(object):
def __init__(self, lexer, lineno, lexpos, filename):
self._line = None
self._lineno = lineno
self._lexpos = lexpos
self._lexdata = lexer.lexdata
self._file = filename if filename else "<unknown>"
def __eq__(self, other):
return (self._lexpos == other._lexpos and
self._file == other._file)
def filename(self):
return self._file
def resolve(self):
if self._line:
return
startofline = self._lexdata.rfind('\n', 0, self._lexpos) + 1
endofline = self._lexdata.find('\n', self._lexpos, self._lexpos + 80)
if endofline != -1:
self._line = self._lexdata[startofline:endofline]
else:
self._line = self._lexdata[startofline:]
self._colno = self._lexpos - startofline
# Our line number seems to point to the start of self._lexdata
self._lineno += self._lexdata.count('\n', 0, startofline)
def get(self):
self.resolve()
return "%s line %s:%s" % (self._file, self._lineno, self._colno)
def _pointerline(self):
return " " * self._colno + "^"
def __str__(self):
self.resolve()
return "%s line %s:%s\n%s\n%s" % (self._file, self._lineno, self._colno,
self._line, self._pointerline())
class BuiltinLocation(object):
def __init__(self, text):
self.msg = text + "\n"
def __eq__(self, other):
return (isinstance(other, BuiltinLocation) and
self.msg == other.msg)
def filename(self):
return '<builtin>'
def resolve(self):
pass
def get(self):
return self.msg
def __str__(self):
return self.get()
# Data Model
class IDLObject(object):
def __init__(self, location):
self.location = location
self.userData = dict()
def filename(self):
return self.location.filename()
def isInterface(self):
return False
def isEnum(self):
return False
def isCallback(self):
return False
def isType(self):
return False
def isDictionary(self):
return False
def isUnion(self):
return False
def isTypedef(self):
return False
def getUserData(self, key, default):
return self.userData.get(key, default)
def setUserData(self, key, value):
self.userData[key] = value
def addExtendedAttributes(self, attrs):
assert False # Override me!
def handleExtendedAttribute(self, attr):
assert False # Override me!
def _getDependentObjects(self):
assert False # Override me!
def getDeps(self, visited=None):
""" Return a set of files that this object depends on. If any of
these files are changed the parser needs to be rerun to regenerate
a new IDLObject.
The visited argument is a set of all the objects already visited.
We must test to see if we are in it, and if so, do nothing. This
prevents infinite recursion."""
# NB: We can't use visited=set() above because the default value is
# evaluated when the def statement is evaluated, not when the function
# is executed, so there would be one set for all invocations.
if visited is None:
visited = set()
if self in visited:
return set()
visited.add(self)
deps = set()
if self.filename() != "<builtin>":
deps.add(self.filename())
for d in self._getDependentObjects():
deps.update(d.getDeps(visited))
return deps
class IDLScope(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
self.parentScope = parentScope
if identifier:
assert isinstance(identifier, IDLIdentifier)
self._name = identifier
else:
self._name = None
self._dict = {}
self.globalNames = set()
# A mapping from global name to the set of global interfaces
# that have that global name.
self.globalNameMapping = defaultdict(set)
self.primaryGlobalAttr = None
self.primaryGlobalName = None
def __str__(self):
return self.QName()
def QName(self):
if self._name:
return self._name.QName() + "::"
return "::"
def ensureUnique(self, identifier, object):
"""
Ensure that there is at most one 'identifier' in scope ('self').
Note that object can be None. This occurs if we end up here for an
interface type we haven't seen yet.
"""
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == identifier
if identifier.name in self._dict:
if not object:
return
# ensureUnique twice with the same object is not allowed
assert id(object) != id(self._dict[identifier.name])
replacement = self.resolveIdentifierConflict(self, identifier,
self._dict[identifier.name],
object)
self._dict[identifier.name] = replacement
return
assert object
self._dict[identifier.name] = object
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
if (isinstance(originalObject, IDLExternalInterface) and
isinstance(newObject, IDLExternalInterface) and
originalObject.identifier.name == newObject.identifier.name):
return originalObject
if (isinstance(originalObject, IDLExternalInterface) or
isinstance(newObject, IDLExternalInterface)):
raise WebIDLError(
"Name collision between "
"interface declarations for identifier '%s' at '%s' and '%s'"
% (identifier.name,
originalObject.location, newObject.location), [])
if (isinstance(originalObject, IDLDictionary) or
isinstance(newObject, IDLDictionary)):
raise WebIDLError(
"Name collision between dictionary declarations for "
"identifier '%s'.\n%s\n%s"
% (identifier.name,
originalObject.location, newObject.location), [])
# We do the merging of overloads here as opposed to in IDLInterface
# because we need to merge overloads of NamedConstructors and we need to
# detect conflicts in those across interfaces. See also the comment in
# IDLInterface.addExtendedAttributes for "NamedConstructor".
if (originalObject.tag == IDLInterfaceMember.Tags.Method and
newObject.tag == IDLInterfaceMember.Tags.Method):
return originalObject.addOverload(newObject)
# Default to throwing, derived classes can override.
conflictdesc = "\n\t%s at %s\n\t%s at %s" % (originalObject,
originalObject.location,
newObject,
newObject.location)
raise WebIDLError(
"Multiple unresolvable definitions of identifier '%s' in scope '%s%s"
% (identifier.name, str(self), conflictdesc), [])
def _lookupIdentifier(self, identifier):
return self._dict[identifier.name]
def lookupIdentifier(self, identifier):
assert isinstance(identifier, IDLIdentifier)
assert identifier.scope == self
return self._lookupIdentifier(identifier)
class IDLIdentifier(IDLObject):
def __init__(self, location, scope, name):
IDLObject.__init__(self, location)
self.name = name
assert isinstance(scope, IDLScope)
self.scope = scope
def __str__(self):
return self.QName()
def QName(self):
return self.scope.QName() + self.name
def __hash__(self):
return self.QName().__hash__()
def __eq__(self, other):
return self.QName() == other.QName()
def object(self):
return self.scope.lookupIdentifier(self)
class IDLUnresolvedIdentifier(IDLObject):
def __init__(self, location, name, allowDoubleUnderscore=False,
allowForbidden=False):
IDLObject.__init__(self, location)
assert len(name) > 0
if name == "__noSuchMethod__":
raise WebIDLError("__noSuchMethod__ is deprecated", [location])
if name[:2] == "__" and name != "__content" and not allowDoubleUnderscore:
raise WebIDLError("Identifiers beginning with __ are reserved",
[location])
if name[0] == '_' and not allowDoubleUnderscore:
name = name[1:]
# TODO: Bug 872377, Restore "toJSON" to below list.
# We sometimes need custom serialization, so allow toJSON for now.
if (name in ["constructor", "toString"] and
not allowForbidden):
raise WebIDLError("Cannot use reserved identifier '%s'" % (name),
[location])
self.name = name
def __str__(self):
return self.QName()
def QName(self):
return "<unresolved scope>::" + self.name
def resolve(self, scope, object):
assert isinstance(scope, IDLScope)
assert not object or isinstance(object, IDLObjectWithIdentifier)
assert not object or object.identifier == self
scope.ensureUnique(self, object)
identifier = IDLIdentifier(self.location, scope, self.name)
if object:
object.identifier = identifier
return identifier
def finish(self):
assert False # Should replace with a resolved identifier first.
class IDLObjectWithIdentifier(IDLObject):
def __init__(self, location, parentScope, identifier):
IDLObject.__init__(self, location)
assert isinstance(identifier, IDLUnresolvedIdentifier)
self.identifier = identifier
if parentScope:
self.resolve(parentScope)
self.treatNullAs = "Default"
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
assert isinstance(self.identifier, IDLUnresolvedIdentifier)
self.identifier.resolve(parentScope, self)
def checkForStringHandlingExtendedAttributes(self, attrs,
isDictionaryMember=False,
isOptional=False):
"""
A helper function to deal with TreatNullAs. Returns the list
of attrs it didn't handle itself.
"""
assert isinstance(self, IDLArgument) or isinstance(self, IDLAttribute)
unhandledAttrs = list()
for attr in attrs:
if not attr.hasValue():
unhandledAttrs.append(attr)
continue
identifier = attr.identifier()
value = attr.value()
if identifier == "TreatNullAs":
if not self.type.isDOMString() or self.type.nullable():
raise WebIDLError("[TreatNullAs] is only allowed on "
"arguments or attributes whose type is "
"DOMString",
[self.location])
if isDictionaryMember:
raise WebIDLError("[TreatNullAs] is not allowed for "
"dictionary members", [self.location])
if value != 'EmptyString':
raise WebIDLError("[TreatNullAs] must take the identifier "
"'EmptyString', not '%s'" % value,
[self.location])
self.treatNullAs = value
else:
unhandledAttrs.append(attr)
return unhandledAttrs
class IDLObjectWithScope(IDLObjectWithIdentifier, IDLScope):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLScope.__init__(self, location, parentScope, self.identifier)
class IDLIdentifierPlaceholder(IDLObjectWithIdentifier):
def __init__(self, location, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
def finish(self, scope):
try:
scope._lookupIdentifier(self.identifier)
except:
raise WebIDLError("Unresolved type '%s'." % self.identifier,
[self.location])
obj = self.identifier.resolve(scope, None)
return scope.lookupIdentifier(obj)
class IDLExposureMixins():
def __init__(self, location):
# _exposureGlobalNames are the global names listed in our [Exposed]
# extended attribute. exposureSet is the exposure set as defined in the
# Web IDL spec: it contains interface names.
self._exposureGlobalNames = set()
self.exposureSet = set()
self._location = location
self._globalScope = None
def finish(self, scope):
assert scope.parentScope is None
self._globalScope = scope
# Verify that our [Exposed] value, if any, makes sense.
for globalName in self._exposureGlobalNames:
if globalName not in scope.globalNames:
raise WebIDLError("Unknown [Exposed] value %s" % globalName,
[self._location])
if len(self._exposureGlobalNames) == 0:
self._exposureGlobalNames.add(scope.primaryGlobalName)
globalNameSetToExposureSet(scope, self._exposureGlobalNames,
self.exposureSet)
def isExposedInWindow(self):
return 'Window' in self.exposureSet
def isExposedOnMainThread(self):
return (self.isExposedInWindow() or
self.isExposedInSystemGlobals())
def isExposedInAnyWorker(self):
return len(self.getWorkerExposureSet()) > 0
def isExposedInWorkerDebugger(self):
return len(self.getWorkerDebuggerExposureSet()) > 0
def isExposedInSystemGlobals(self):
return 'BackstagePass' in self.exposureSet
def isExposedInSomeButNotAllWorkers(self):
"""
Returns true if the Exposed extended attribute for this interface
exposes it in some worker globals but not others. The return value does
not depend on whether the interface is exposed in Window or System
globals.
"""
if not self.isExposedInAnyWorker():
return False
workerScopes = self.parentScope.globalNameMapping["Worker"]
return len(workerScopes.difference(self.exposureSet)) > 0
def getWorkerExposureSet(self):
workerScopes = self._globalScope.globalNameMapping["Worker"]
return workerScopes.intersection(self.exposureSet)
def getWorkerDebuggerExposureSet(self):
workerDebuggerScopes = self._globalScope.globalNameMapping["WorkerDebugger"]
return workerDebuggerScopes.intersection(self.exposureSet)
class IDLExternalInterface(IDLObjectWithIdentifier, IDLExposureMixins):
def __init__(self, location, parentScope, identifier):
assert isinstance(identifier, IDLUnresolvedIdentifier)
assert isinstance(parentScope, IDLScope)
self.parent = None
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
IDLExposureMixins.__init__(self, location)
IDLObjectWithIdentifier.resolve(self, parentScope)
def finish(self, scope):
IDLExposureMixins.finish(self, scope)
pass
def validate(self):
pass
def isIteratorInterface(self):
return False
def isExternal(self):
return True
def isInterface(self):
return True
def isConsequential(self):
return False
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def resolve(self, parentScope):
pass
def getJSImplementation(self):
return None
def isJSImplemented(self):
return False
def isProbablyShortLivingObject(self):
return False
def getNavigatorProperty(self):
return None
def _getDependentObjects(self):
return set()
class IDLPartialInterface(IDLObject):
def __init__(self, location, name, members, nonPartialInterface):
assert isinstance(name, IDLUnresolvedIdentifier)
IDLObject.__init__(self, location)
self.identifier = name
self.members = members
# propagatedExtendedAttrs are the ones that should get
# propagated to our non-partial interface.
self.propagatedExtendedAttrs = []
self._nonPartialInterface = nonPartialInterface
self._finished = False
nonPartialInterface.addPartialInterface(self)
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
if identifier in ["Constructor", "NamedConstructor"]:
self.propagatedExtendedAttrs.append(attr)
elif identifier == "Exposed":
# This just gets propagated to all our members.
for member in self.members:
if len(member._exposureGlobalNames) != 0:
raise WebIDLError("[Exposed] specified on both a "
"partial interface member and on the "
"partial interface itself",
[member.location, attr.location])
member.addExtendedAttributes([attr])
else:
raise WebIDLError("Unknown extended attribute %s on partial "
"interface" % identifier,
[attr.location])
def finish(self, scope):
if self._finished:
return
self._finished = True
# Need to make sure our non-partial interface gets finished so it can
# report cases when we only have partial interfaces.
self._nonPartialInterface.finish(scope)
def validate(self):
pass
def convertExposedAttrToGlobalNameSet(exposedAttr, targetSet):
assert len(targetSet) == 0
if exposedAttr.hasValue():
targetSet.add(exposedAttr.value())
else:
assert exposedAttr.hasArgs()
targetSet.update(exposedAttr.args())
def globalNameSetToExposureSet(globalScope, nameSet, exposureSet):
for name in nameSet:
exposureSet.update(globalScope.globalNameMapping[name])
class IDLInterface(IDLObjectWithScope, IDLExposureMixins):
def __init__(self, location, parentScope, name, parent, members,
isKnownNonPartial):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert isKnownNonPartial or not parent
assert isKnownNonPartial or len(members) == 0
self.parent = None
self._callback = False
self._finished = False
self.members = []
self.maplikeOrSetlikeOrIterable = None
self._partialInterfaces = []
self._extendedAttrDict = {}
# namedConstructors needs deterministic ordering because bindings code
# outputs the constructs in the order that namedConstructors enumerates
# them.
self.namedConstructors = list()
self.implementedInterfaces = set()
self._consequential = False
self._isKnownNonPartial = False
# self.interfacesBasedOnSelf is the set of interfaces that inherit from
# self or have self as a consequential interface, including self itself.
# Used for distinguishability checking.
self.interfacesBasedOnSelf = set([self])
# self.interfacesImplementingSelf is the set of interfaces that directly
# have self as a consequential interface
self.interfacesImplementingSelf = set()
self._hasChildInterfaces = False
self._isOnGlobalProtoChain = False
# Tracking of the number of reserved slots we need for our
# members and those of ancestor interfaces.
self.totalMembersInSlots = 0
# Tracking of the number of own own members we have in slots
self._ownMembersInSlots = 0
# If this is an iterator interface, we need to know what iterable
# interface we're iterating for in order to get its nativeType.
self.iterableInterface = None
IDLObjectWithScope.__init__(self, location, parentScope, name)
IDLExposureMixins.__init__(self, location)
if isKnownNonPartial:
self.setNonPartial(location, parent, members)
def __str__(self):
return "Interface '%s'" % self.identifier.name
def ctor(self):
identifier = IDLUnresolvedIdentifier(self.location, "constructor",
allowForbidden=True)
try:
return self._lookupIdentifier(identifier)
except:
return None
def isIterable(self):
return (self.maplikeOrSetlikeOrIterable and
self.maplikeOrSetlikeOrIterable.isIterable())
def isIteratorInterface(self):
return self.iterableInterface is not None
def resolveIdentifierConflict(self, scope, identifier, originalObject, newObject):
assert isinstance(scope, IDLScope)
assert isinstance(originalObject, IDLInterfaceMember)
assert isinstance(newObject, IDLInterfaceMember)
retval = IDLScope.resolveIdentifierConflict(self, scope, identifier,
originalObject, newObject)
# Might be a ctor, which isn't in self.members
if newObject in self.members:
self.members.remove(newObject)
return retval
def finish(self, scope):
if self._finished:
return
self._finished = True
if not self._isKnownNonPartial:
raise WebIDLError("Interface %s does not have a non-partial "
"declaration" % self.identifier.name,
[self.location])
IDLExposureMixins.finish(self, scope)
# Now go ahead and merge in our partial interfaces.
for partial in self._partialInterfaces:
partial.finish(scope)
self.addExtendedAttributes(partial.propagatedExtendedAttrs)
self.members.extend(partial.members)
# Generate maplike/setlike interface members. Since generated members
# need to be treated like regular interface members, do this before
# things like exposure setting.
for member in self.members:
if member.isMaplikeOrSetlikeOrIterable():
# Check that we only have one interface declaration (currently
# there can only be one maplike/setlike declaration per
# interface)
if self.maplikeOrSetlikeOrIterable:
raise WebIDLError("%s declaration used on "
"interface that already has %s "
"declaration" %
(member.maplikeOrSetlikeOrIterableType,
self.maplikeOrSetlikeOrIterable.maplikeOrSetlikeOrIterableType),
[self.maplikeOrSetlikeOrIterable.location,
member.location])
self.maplikeOrSetlikeOrIterable = member
# If we've got a maplike or setlike declaration, we'll be building all of
# our required methods in Codegen. Generate members now.
self.maplikeOrSetlikeOrIterable.expand(self.members, self.isJSImplemented())
# Now that we've merged in our partial interfaces, set the
# _exposureGlobalNames on any members that don't have it set yet. Note
# that any partial interfaces that had [Exposed] set have already set up
# _exposureGlobalNames on all the members coming from them, so this is
# just implementing the "members default to interface that defined them"
# and "partial interfaces default to interface they're a partial for"
# rules from the spec.
for m in self.members:
# If m, or the partial interface m came from, had [Exposed]
# specified, it already has a nonempty exposure global names set.
if len(m._exposureGlobalNames) == 0:
m._exposureGlobalNames.update(self._exposureGlobalNames)
assert not self.parent or isinstance(self.parent, IDLIdentifierPlaceholder)
parent = self.parent.finish(scope) if self.parent else None
if parent and isinstance(parent, IDLExternalInterface):
raise WebIDLError("%s inherits from %s which does not have "
"a definition" %
(self.identifier.name,
self.parent.identifier.name),
[self.location])
assert not parent or isinstance(parent, IDLInterface)
self.parent = parent
assert iter(self.members)
if self.parent:
self.parent.finish(scope)
self.parent._hasChildInterfaces = True
self.totalMembersInSlots = self.parent.totalMembersInSlots
# Interfaces with [Global] or [PrimaryGlobal] must not
# have anything inherit from them
if (self.parent.getExtendedAttribute("Global") or
self.parent.getExtendedAttribute("PrimaryGlobal")):
# Note: This is not a self.parent.isOnGlobalProtoChain() check
# because ancestors of a [Global] interface can have other
# descendants.
raise WebIDLError("[Global] interface has another interface "
"inheriting from it",
[self.location, self.parent.location])
# Make sure that we're not exposed in places where our parent is not
if not self.exposureSet.issubset(self.parent.exposureSet):
raise WebIDLError("Interface %s is exposed in globals where its "
"parent interface %s is not exposed." %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Callbacks must not inherit from non-callbacks or inherit from
# anything that has consequential interfaces.
# XXXbz Can non-callbacks inherit from callbacks? Spec issue pending.
# XXXbz Can callbacks have consequential interfaces? Spec issue pending
if self.isCallback():
if not self.parent.isCallback():
raise WebIDLError("Callback interface %s inheriting from "
"non-callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
elif self.parent.isCallback():
raise WebIDLError("Non-callback interface %s inheriting from "
"callback interface %s" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
# Interfaces which have interface objects can't inherit
# from [NoInterfaceObject] interfaces.
if (self.parent.getExtendedAttribute("NoInterfaceObject") and
not self.getExtendedAttribute("NoInterfaceObject")):
raise WebIDLError("Interface %s does not have "
"[NoInterfaceObject] but inherits from "
"interface %s which does" %
(self.identifier.name,
self.parent.identifier.name),
[self.location, self.parent.location])
for iface in self.implementedInterfaces:
iface.finish(scope)
cycleInGraph = self.findInterfaceLoopPoint(self)
if cycleInGraph:
raise WebIDLError("Interface %s has itself as ancestor or "
"implemented interface" % self.identifier.name,
[self.location, cycleInGraph.location])
if self.isCallback():
# "implements" should have made sure we have no
# consequential interfaces.
assert len(self.getConsequentialInterfaces()) == 0
# And that we're not consequential.
assert not self.isConsequential()
# Now resolve() and finish() our members before importing the
# ones from our implemented interfaces.
# resolve() will modify self.members, so we need to iterate
# over a copy of the member list here.
for member in list(self.members):
member.resolve(self)
for member in self.members:
member.finish(scope)
# Now that we've finished our members, which has updated their exposure
# sets, make sure they aren't exposed in places where we are not.
for member in self.members:
if not member.exposureSet.issubset(self.exposureSet):
raise WebIDLError("Interface member has larger exposure set "
"than the interface itself",
[member.location, self.location])
ctor = self.ctor()
if ctor is not None:
assert len(ctor._exposureGlobalNames) == 0
ctor._exposureGlobalNames.update(self._exposureGlobalNames)
ctor.finish(scope)
for ctor in self.namedConstructors:
assert len(ctor._exposureGlobalNames) == 0
ctor._exposureGlobalNames.update(self._exposureGlobalNames)
ctor.finish(scope)
# Make a copy of our member list, so things that implement us
# can get those without all the stuff we implement ourselves
# admixed.
self.originalMembers = list(self.members)
# Import everything from our consequential interfaces into
# self.members. Sort our consequential interfaces by name
# just so we have a consistent order.
for iface in sorted(self.getConsequentialInterfaces(),
cmp=cmp,
key=lambda x: x.identifier.name):
# Flag the interface as being someone's consequential interface
iface.setIsConsequentialInterfaceOf(self)
# Verify that we're not exposed somewhere where iface is not exposed
if not self.exposureSet.issubset(iface.exposureSet):
raise WebIDLError("Interface %s is exposed in globals where its "
"consequential interface %s is not exposed." %
(self.identifier.name, iface.identifier.name),
[self.location, iface.location])
# If we have a maplike or setlike, and the consequential interface
# also does, throw an error.
if iface.maplikeOrSetlikeOrIterable and self.maplikeOrSetlikeOrIterable:
raise WebIDLError("Maplike/setlike/iterable interface %s cannot have "
"maplike/setlike/iterable interface %s as a "
"consequential interface" %
(self.identifier.name,
iface.identifier.name),
[self.maplikeOrSetlikeOrIterable.location,
iface.maplikeOrSetlikeOrIterable.location])
additionalMembers = iface.originalMembers
for additionalMember in additionalMembers:
for member in self.members:
if additionalMember.identifier.name == member.identifier.name:
raise WebIDLError(
"Multiple definitions of %s on %s coming from 'implements' statements" %
(member.identifier.name, self),
[additionalMember.location, member.location])
self.members.extend(additionalMembers)
iface.interfacesImplementingSelf.add(self)
for ancestor in self.getInheritedInterfaces():
ancestor.interfacesBasedOnSelf.add(self)
if (ancestor.maplikeOrSetlikeOrIterable is not None and
self.maplikeOrSetlikeOrIterable is not None):
raise WebIDLError("Cannot have maplike/setlike on %s that "
"inherits %s, which is already "
"maplike/setlike" %
(self.identifier.name,
ancestor.identifier.name),
[self.maplikeOrSetlikeOrIterable.location,
ancestor.maplikeOrSetlikeOrIterable.location])
for ancestorConsequential in ancestor.getConsequentialInterfaces():
ancestorConsequential.interfacesBasedOnSelf.add(self)
# Deal with interfaces marked [Unforgeable], now that we have our full
# member list, except unforgeables pulled in from parents. We want to
# do this before we set "originatingInterface" on our unforgeable
# members.
if self.getExtendedAttribute("Unforgeable"):
# Check that the interface already has all the things the
# spec would otherwise require us to synthesize and is
# missing the ones we plan to synthesize.
if not any(m.isMethod() and m.isStringifier() for m in self.members):
raise WebIDLError("Unforgeable interface %s does not have a "
"stringifier" % self.identifier.name,
[self.location])
for m in self.members:
if ((m.isMethod() and m.isJsonifier()) or
m.identifier.name == "toJSON"):
raise WebIDLError("Unforgeable interface %s has a "
"jsonifier so we won't be able to add "
"one ourselves" % self.identifier.name,
[self.location, m.location])
if m.identifier.name == "valueOf" and not m.isStatic():
raise WebIDLError("Unforgeable interface %s has a valueOf "
"member so we won't be able to add one "
"ourselves" % self.identifier.name,
[self.location, m.location])
for member in self.members:
if ((member.isAttr() or member.isMethod()) and
member.isUnforgeable() and
not hasattr(member, "originatingInterface")):
member.originatingInterface = self
# Compute slot indices for our members before we pull in unforgeable
# members from our parent. Also, maplike/setlike declarations get a
# slot to hold their backing object.
for member in self.members:
if ((member.isAttr() and
(member.getExtendedAttribute("StoreInSlot") or
member.getExtendedAttribute("Cached"))) or
member.isMaplikeOrSetlike()):
if member.slotIndices is None:
member.slotIndices = dict()
member.slotIndices[self.identifier.name] = self.totalMembersInSlots
self.totalMembersInSlots += 1
if member.getExtendedAttribute("StoreInSlot"):
self._ownMembersInSlots += 1
if self.parent:
# Make sure we don't shadow any of the [Unforgeable] attributes on
# our ancestor interfaces. We don't have to worry about
# consequential interfaces here, because those have already been
# imported into the relevant .members lists. And we don't have to
# worry about anything other than our parent, because it has already
# imported its ancestors unforgeable attributes into its member
# list.
for unforgeableMember in (member for member in self.parent.members if
(member.isAttr() or member.isMethod()) and
member.isUnforgeable()):
shadows = [m for m in self.members if
(m.isAttr() or m.isMethod()) and
not m.isStatic() and
m.identifier.name == unforgeableMember.identifier.name]
if len(shadows) != 0:
locs = [unforgeableMember.location] + [s.location for s
in shadows]
raise WebIDLError("Interface %s shadows [Unforgeable] "
"members of %s" %
(self.identifier.name,
ancestor.identifier.name),
locs)
# And now just stick it in our members, since we won't be
# inheriting this down the proto chain. If we really cared we
# could try to do something where we set up the unforgeable
# attributes/methods of ancestor interfaces, with their
# corresponding getters, on our interface, but that gets pretty
# complicated and seems unnecessary.
self.members.append(unforgeableMember)
# At this point, we have all of our members. If the current interface
# uses maplike/setlike, check for collisions anywhere in the current
# interface or higher in the inheritance chain.
if self.maplikeOrSetlikeOrIterable:
testInterface = self
isAncestor = False
while testInterface:
self.maplikeOrSetlikeOrIterable.checkCollisions(testInterface.members,
isAncestor)
isAncestor = True
testInterface = testInterface.parent
# Ensure that there's at most one of each {named,indexed}
# {getter,setter,creator,deleter}, at most one stringifier,
# and at most one legacycaller. Note that this last is not
# quite per spec, but in practice no one overloads
# legacycallers.
specialMembersSeen = {}
for member in self.members:
if not member.isMethod():
continue
if member.isGetter():
memberType = "getters"
elif member.isSetter():
memberType = "setters"
elif member.isCreator():
memberType = "creators"
elif member.isDeleter():
memberType = "deleters"
elif member.isStringifier():
memberType = "stringifiers"
elif member.isJsonifier():
memberType = "jsonifiers"
elif member.isLegacycaller():
memberType = "legacycallers"
else:
continue
if (memberType != "stringifiers" and memberType != "legacycallers" and
memberType != "jsonifiers"):
if member.isNamed():
memberType = "named " + memberType
else:
assert member.isIndexed()
memberType = "indexed " + memberType
if memberType in specialMembersSeen:
raise WebIDLError("Multiple " + memberType + " on %s" % (self),
[self.location,
specialMembersSeen[memberType].location,
member.location])
specialMembersSeen[memberType] = member
if (self.getExtendedAttribute("LegacyUnenumerableNamedProperties") and
"named getters" not in specialMembersSeen):
raise WebIDLError("[LegacyUnenumerableNamedProperties] used on an interface "
"without a named getter",
[self.location])
if self._isOnGlobalProtoChain:
# Make sure we have no named setters, creators, or deleters
for memberType in ["setter", "creator", "deleter"]:
memberId = "named " + memberType + "s"
if memberId in specialMembersSeen:
raise WebIDLError("Interface with [Global] has a named %s" %
memberType,
[self.location,
specialMembersSeen[memberId].location])
# Make sure we're not [OverrideBuiltins]
if self.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] also has "
"[OverrideBuiltins]",
[self.location])
# Mark all of our ancestors as being on the global's proto chain too
parent = self.parent
while parent:
# Must not inherit from an interface with [OverrideBuiltins]
if parent.getExtendedAttribute("OverrideBuiltins"):
raise WebIDLError("Interface with [Global] inherits from "
"interface with [OverrideBuiltins]",
[self.location, parent.location])
parent._isOnGlobalProtoChain = True
parent = parent.parent
def validate(self):
# We don't support consequential unforgeable interfaces. Need to check
# this here, because in finish() an interface might not know yet that
# it's consequential.
if self.getExtendedAttribute("Unforgeable") and self.isConsequential():
raise WebIDLError(
"%s is an unforgeable consequential interface" %
self.identifier.name,
[self.location] +
list(i.location for i in
(self.interfacesBasedOnSelf - {self})))
# We also don't support inheriting from unforgeable interfaces.
if self.getExtendedAttribute("Unforgeable") and self.hasChildInterfaces():
locations = ([self.location] +
list(i.location for i in
self.interfacesBasedOnSelf if i.parent == self))
raise WebIDLError("%s is an unforgeable ancestor interface" %
self.identifier.name,
locations)
indexedGetter = None
hasLengthAttribute = False
for member in self.members:
member.validate()
if self.isCallback() and member.getExtendedAttribute("Replaceable"):
raise WebIDLError("[Replaceable] used on an attribute on "
"interface %s which is a callback interface" %
self.identifier.name,
[self.location, member.location])
# Check that PutForwards refers to another attribute and that no
# cycles exist in forwarded assignments. Also check for a
# integer-typed "length" attribute.
if member.isAttr():
if (member.identifier.name == "length" and
member.type.isInteger()):
hasLengthAttribute = True
iface = self
attr = member
putForwards = attr.getExtendedAttribute("PutForwards")
if putForwards and self.isCallback():
raise WebIDLError("[PutForwards] used on an attribute "
"on interface %s which is a callback "
"interface" % self.identifier.name,
[self.location, member.location])
while putForwards is not None:
forwardIface = attr.type.unroll().inner
fowardAttr = None
for forwardedMember in forwardIface.members:
if (not forwardedMember.isAttr() or
forwardedMember.identifier.name != putForwards[0]):
continue
if forwardedMember == member:
raise WebIDLError("Cycle detected in forwarded "
"assignments for attribute %s on "
"%s" %
(member.identifier.name, self),
[member.location])
fowardAttr = forwardedMember
break
if fowardAttr is None:
raise WebIDLError("Attribute %s on %s forwards to "
"missing attribute %s" %
(attr.identifier.name, iface, putForwards),
[attr.location])
iface = forwardIface
attr = fowardAttr
putForwards = attr.getExtendedAttribute("PutForwards")
# Check that the name of an [Alias] doesn't conflict with an
# interface member and whether we support indexed properties.
if member.isMethod():
if member.isGetter() and member.isIndexed():
indexedGetter = member
for alias in member.aliases:
if self.isOnGlobalProtoChain():
raise WebIDLError("[Alias] must not be used on a "
"[Global] interface operation",
[member.location])
if (member.getExtendedAttribute("Exposed") or
member.getExtendedAttribute("ChromeOnly") or
member.getExtendedAttribute("Pref") or
member.getExtendedAttribute("Func") or
member.getExtendedAttribute("AvailableIn") or
member.getExtendedAttribute("CheckAnyPermissions") or
member.getExtendedAttribute("CheckAllPermissions")):
raise WebIDLError("[Alias] must not be used on a "
"conditionally exposed operation",
[member.location])
if member.isStatic():
raise WebIDLError("[Alias] must not be used on a "
"static operation",
[member.location])
if member.isIdentifierLess():
raise WebIDLError("[Alias] must not be used on an "
"identifierless operation",
[member.location])
if member.isUnforgeable():
raise WebIDLError("[Alias] must not be used on an "
"[Unforgeable] operation",
[member.location])
for m in self.members:
if m.identifier.name == alias:
raise WebIDLError("[Alias=%s] has same name as "
"interface member" % alias,
[member.location, m.location])
if m.isMethod() and m != member and alias in m.aliases:
raise WebIDLError("duplicate [Alias=%s] definitions" %
alias,
[member.location, m.location])
if (self.getExtendedAttribute("Pref") and
self._exposureGlobalNames != set([self.parentScope.primaryGlobalName])):
raise WebIDLError("[Pref] used on an interface that is not %s-only" %
self.parentScope.primaryGlobalName,
[self.location])
for attribute in ["CheckAnyPermissions", "CheckAllPermissions"]:
if (self.getExtendedAttribute(attribute) and
self._exposureGlobalNames != set([self.parentScope.primaryGlobalName])):
raise WebIDLError("[%s] used on an interface that is "
"not %s-only" %
(attribute, self.parentScope.primaryGlobalName),
[self.location])
# Conditional exposure makes no sense for interfaces with no
# interface object, unless they're navigator properties.
if (self.isExposedConditionally() and
not self.hasInterfaceObject() and
not self.getNavigatorProperty()):
raise WebIDLError("Interface with no interface object is "
"exposed conditionally",
[self.location])
# Value iterators are only allowed on interfaces with indexed getters,
# and pair iterators are only allowed on interfaces without indexed
# getters.
if self.isIterable():
iterableDecl = self.maplikeOrSetlikeOrIterable
if iterableDecl.isValueIterator():
if not indexedGetter:
raise WebIDLError("Interface with value iterator does not "
"support indexed properties",
[self.location])
if iterableDecl.valueType != indexedGetter.signatures()[0][0]:
raise WebIDLError("Iterable type does not match indexed "
"getter type",
[iterableDecl.location,
indexedGetter.location])
if not hasLengthAttribute:
raise WebIDLError('Interface with value iterator does not '
'have an integer-typed "length" attribute',
[self.location])
else:
assert iterableDecl.isPairIterator()
if indexedGetter:
raise WebIDLError("Interface with pair iterator supports "
"indexed properties",
[self.location, iterableDecl.location,
indexedGetter.location])
def isInterface(self):
return True
def isExternal(self):
return False
def setIsConsequentialInterfaceOf(self, other):
self._consequential = True
self.interfacesBasedOnSelf.add(other)
def isConsequential(self):
return self._consequential
def setCallback(self, value):
self._callback = value
def isCallback(self):
return self._callback
def isSingleOperationInterface(self):
assert self.isCallback() or self.isJSImplemented()
return (
# JS-implemented things should never need the
# this-handling weirdness of single-operation interfaces.
not self.isJSImplemented() and
# Not inheriting from another interface
not self.parent and
# No consequential interfaces
len(self.getConsequentialInterfaces()) == 0 and
# No attributes of any kinds
not any(m.isAttr() for m in self.members) and
# There is at least one regular operation, and all regular
# operations have the same identifier
len(set(m.identifier.name for m in self.members if
m.isMethod() and not m.isStatic())) == 1)
def inheritanceDepth(self):
depth = 0
parent = self.parent
while parent:
depth = depth + 1
parent = parent.parent
return depth
def hasConstants(self):
return any(m.isConst() for m in self.members)
def hasInterfaceObject(self):
if self.isCallback():
return self.hasConstants()
return not hasattr(self, "_noInterfaceObject")
def hasInterfacePrototypeObject(self):
return not self.isCallback() and self.getUserData('hasConcreteDescendant', False)
def addExtendedAttributes(self, attrs):
for attr in attrs:
identifier = attr.identifier()
# Special cased attrs
if identifier == "TreatNonCallableAsNull":
raise WebIDLError("TreatNonCallableAsNull cannot be specified on interfaces",
[attr.location, self.location])
if identifier == "TreatNonObjectAsNull":
raise WebIDLError("TreatNonObjectAsNull cannot be specified on interfaces",
[attr.location, self.location])
elif identifier == "NoInterfaceObject":
if not attr.noArguments():
raise WebIDLError("[NoInterfaceObject] must take no arguments",
[attr.location])
if self.ctor():
raise WebIDLError("Constructor and NoInterfaceObject are incompatible",
[self.location])
self._noInterfaceObject = True
elif identifier == "Constructor" or identifier == "NamedConstructor" or identifier == "ChromeConstructor":
if identifier == "Constructor" and not self.hasInterfaceObject():
raise WebIDLError(str(identifier) + " and NoInterfaceObject are incompatible",
[self.location])
if identifier == "NamedConstructor" and not attr.hasValue():
raise WebIDLError("NamedConstructor must either take an identifier or take a named argument list",
[attr.location])
if identifier == "ChromeConstructor" and not self.hasInterfaceObject():
raise WebIDLError(str(identifier) + " and NoInterfaceObject are incompatible",
[self.location])
args = attr.args() if attr.hasArgs() else []
if self.identifier.name == "Promise":
promiseType = BuiltinTypes[IDLBuiltinType.Types.any]
else:
promiseType = None
retType = IDLWrapperType(self.location, self, promiseType)
if identifier == "Constructor" or identifier == "ChromeConstructor":
name = "constructor"
allowForbidden = True
else:
name = attr.value()
allowForbidden = False
methodIdentifier = IDLUnresolvedIdentifier(self.location, name,
allowForbidden=allowForbidden)
method = IDLMethod(self.location, methodIdentifier, retType,
args, static=True)
# Constructors are always NewObject and are always
# assumed to be able to throw (since there's no way to
# indicate otherwise) and never have any other
# extended attributes.
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",)),
IDLExtendedAttribute(self.location, ("Throws",))])
if identifier == "ChromeConstructor":
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("ChromeOnly",))])
if identifier == "Constructor" or identifier == "ChromeConstructor":
method.resolve(self)
else:
# We need to detect conflicts for NamedConstructors across
# interfaces. We first call resolve on the parentScope,
# which will merge all NamedConstructors with the same
# identifier accross interfaces as overloads.
method.resolve(self.parentScope)
# Then we look up the identifier on the parentScope. If the
# result is the same as the method we're adding then it
# hasn't been added as an overload and it's the first time
# we've encountered a NamedConstructor with that identifier.
# If the result is not the same as the method we're adding
# then it has been added as an overload and we need to check
# whether the result is actually one of our existing
# NamedConstructors.
newMethod = self.parentScope.lookupIdentifier(method.identifier)
if newMethod == method:
self.namedConstructors.append(method)
elif newMethod not in self.namedConstructors:
raise WebIDLError("NamedConstructor conflicts with a NamedConstructor of a different interface",
[method.location, newMethod.location])
elif (identifier == "ArrayClass"):
if not attr.noArguments():
raise WebIDLError("[ArrayClass] must take no arguments",
[attr.location])
if self.parent:
raise WebIDLError("[ArrayClass] must not be specified on "
"an interface with inherited interfaces",
[attr.location, self.location])
elif (identifier == "ExceptionClass"):
if not attr.noArguments():
raise WebIDLError("[ExceptionClass] must take no arguments",
[attr.location])
if self.parent:
raise WebIDLError("[ExceptionClass] must not be specified on "
"an interface with inherited interfaces",
[attr.location, self.location])
elif identifier == "Global":
if attr.hasValue():
self.globalNames = [attr.value()]
elif attr.hasArgs():
self.globalNames = attr.args()
else:
self.globalNames = [self.identifier.name]
self.parentScope.globalNames.update(self.globalNames)
for globalName in self.globalNames:
self.parentScope.globalNameMapping[globalName].add(self.identifier.name)
self._isOnGlobalProtoChain = True
elif identifier == "PrimaryGlobal":
if not attr.noArguments():
raise WebIDLError("[PrimaryGlobal] must take no arguments",
[attr.location])
if self.parentScope.primaryGlobalAttr is not None:
raise WebIDLError(
"[PrimaryGlobal] specified twice",
[attr.location,
self.parentScope.primaryGlobalAttr.location])
self.parentScope.primaryGlobalAttr = attr
self.parentScope.primaryGlobalName = self.identifier.name
self.parentScope.globalNames.add(self.identifier.name)
self.parentScope.globalNameMapping[self.identifier.name].add(self.identifier.name)
self._isOnGlobalProtoChain = True
elif (identifier == "NeedResolve" or
identifier == "OverrideBuiltins" or
identifier == "ChromeOnly" or
identifier == "Unforgeable" or
identifier == "UnsafeInPrerendering" or
identifier == "LegacyEventInit" or
identifier == "ProbablyShortLivingObject" or
identifier == "Abstract" or
identifier == "LegacyUnenumerableNamedProperties"):
# Known extended attributes that do not take values
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr,
self._exposureGlobalNames)
elif (identifier == "Pref" or
identifier == "JSImplementation" or
identifier == "HeaderFile" or
identifier == "NavigatorProperty" or
identifier == "AvailableIn" or
identifier == "Func" or
identifier == "CheckAnyPermissions" or
identifier == "CheckAllPermissions" or
identifier == "Deprecated"):
# Known extended attributes that take a string value
if not attr.hasValue():
raise WebIDLError("[%s] must have a value" % identifier,
[attr.location])
else:
raise WebIDLError("Unknown extended attribute %s on interface" % identifier,
[attr.location])
attrlist = attr.listValue()
self._extendedAttrDict[identifier] = attrlist if len(attrlist) else True
def addImplementedInterface(self, implementedInterface):
assert(isinstance(implementedInterface, IDLInterface))
self.implementedInterfaces.add(implementedInterface)
def getInheritedInterfaces(self):
"""
Returns a list of the interfaces this interface inherits from
(not including this interface itself). The list is in order
from most derived to least derived.
"""
assert(self._finished)
if not self.parent:
return []
parentInterfaces = self.parent.getInheritedInterfaces()
parentInterfaces.insert(0, self.parent)
return parentInterfaces
def getConsequentialInterfaces(self):
assert(self._finished)
# The interfaces we implement directly
consequentialInterfaces = set(self.implementedInterfaces)
# And their inherited interfaces
for iface in self.implementedInterfaces:
consequentialInterfaces |= set(iface.getInheritedInterfaces())
# And now collect up the consequential interfaces of all of those
temp = set()
for iface in consequentialInterfaces:
temp |= iface.getConsequentialInterfaces()
return consequentialInterfaces | temp
def findInterfaceLoopPoint(self, otherInterface):
"""
Finds an interface, amongst our ancestors and consequential interfaces,
that inherits from otherInterface or implements otherInterface
directly. If there is no such interface, returns None.
"""
if self.parent:
if self.parent == otherInterface:
return self
loopPoint = self.parent.findInterfaceLoopPoint(otherInterface)
if loopPoint:
return loopPoint
if otherInterface in self.implementedInterfaces:
return self
for iface in self.implementedInterfaces:
loopPoint = iface.findInterfaceLoopPoint(otherInterface)
if loopPoint:
return loopPoint
return None
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def setNonPartial(self, location, parent, members):
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
if self._isKnownNonPartial:
raise WebIDLError("Two non-partial definitions for the "
"same interface",
[location, self.location])
self._isKnownNonPartial = True
# Now make it look like we were parsed at this new location, since
# that's the place where the interface is "really" defined
self.location = location
assert not self.parent
self.parent = parent
# Put the new members at the beginning
self.members = members + self.members
def addPartialInterface(self, partial):
assert self.identifier.name == partial.identifier.name
self._partialInterfaces.append(partial)
def getJSImplementation(self):
classId = self.getExtendedAttribute("JSImplementation")
if not classId:
return classId
assert isinstance(classId, list)
assert len(classId) == 1
return classId[0]
def isJSImplemented(self):
return bool(self.getJSImplementation())
def isProbablyShortLivingObject(self):
current = self
while current:
if current.getExtendedAttribute("ProbablyShortLivingObject"):
return True
current = current.parent
return False
def getNavigatorProperty(self):
naviProp = self.getExtendedAttribute("NavigatorProperty")
if not naviProp:
return None
assert len(naviProp) == 1
assert isinstance(naviProp, list)
assert len(naviProp[0]) != 0
return naviProp[0]
def hasChildInterfaces(self):
return self._hasChildInterfaces
def isOnGlobalProtoChain(self):
return self._isOnGlobalProtoChain
def _getDependentObjects(self):
deps = set(self.members)
deps.update(self.implementedInterfaces)
if self.parent:
deps.add(self.parent)
return deps
def hasMembersInSlots(self):
return self._ownMembersInSlots != 0
def isExposedConditionally(self):
return (self.getExtendedAttribute("Pref") or
self.getExtendedAttribute("ChromeOnly") or
self.getExtendedAttribute("Func") or
self.getExtendedAttribute("AvailableIn") or
self.getExtendedAttribute("CheckAnyPermissions") or
self.getExtendedAttribute("CheckAllPermissions"))
class IDLDictionary(IDLObjectWithScope):
def __init__(self, location, parentScope, name, parent, members):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
assert not parent or isinstance(parent, IDLIdentifierPlaceholder)
self.parent = parent
self._finished = False
self.members = list(members)
IDLObjectWithScope.__init__(self, location, parentScope, name)
def __str__(self):
return "Dictionary '%s'" % self.identifier.name
def isDictionary(self):
return True
def canBeEmpty(self):
"""
Returns true if this dictionary can be empty (that is, it has no
required members and neither do any of its ancestors).
"""
return (all(member.optional for member in self.members) and
(not self.parent or self.parent.canBeEmpty()))
def finish(self, scope):
if self._finished:
return
self._finished = True
if self.parent:
assert isinstance(self.parent, IDLIdentifierPlaceholder)
oldParent = self.parent
self.parent = self.parent.finish(scope)
if not isinstance(self.parent, IDLDictionary):
raise WebIDLError("Dictionary %s has parent that is not a dictionary" %
self.identifier.name,
[oldParent.location, self.parent.location])
# Make sure the parent resolves all its members before we start
# looking at them.
self.parent.finish(scope)
for member in self.members:
member.resolve(self)
if not member.isComplete():
member.complete(scope)
assert member.type.isComplete()
# Members of a dictionary are sorted in lexicographic order
self.members.sort(cmp=cmp, key=lambda x: x.identifier.name)
inheritedMembers = []
ancestor = self.parent
while ancestor:
if ancestor == self:
raise WebIDLError("Dictionary %s has itself as an ancestor" %
self.identifier.name,
[self.identifier.location])
inheritedMembers.extend(ancestor.members)
ancestor = ancestor.parent
# Catch name duplication
for inheritedMember in inheritedMembers:
for member in self.members:
if member.identifier.name == inheritedMember.identifier.name:
raise WebIDLError("Dictionary %s has two members with name %s" %
(self.identifier.name, member.identifier.name),
[member.location, inheritedMember.location])
def validate(self):
def typeContainsDictionary(memberType, dictionary):
"""
Returns a tuple whose:
- First element is a Boolean value indicating whether
memberType contains dictionary.
- Second element is:
A list of locations that leads from the type that was passed in
the memberType argument, to the dictionary being validated,
if the boolean value in the first element is True.
None, if the boolean value in the first element is False.
"""
if (memberType.nullable() or
memberType.isArray() or
memberType.isSequence() or
memberType.isMozMap()):
return typeContainsDictionary(memberType.inner, dictionary)
if memberType.isDictionary():
if memberType.inner == dictionary:
return (True, [memberType.location])
(contains, locations) = dictionaryContainsDictionary(memberType.inner,
dictionary)
if contains:
return (True, [memberType.location] + locations)
if memberType.isUnion():
for member in memberType.flatMemberTypes:
(contains, locations) = typeContainsDictionary(member, dictionary)
if contains:
return (True, locations)
return (False, None)
def dictionaryContainsDictionary(dictMember, dictionary):
for member in dictMember.members:
(contains, locations) = typeContainsDictionary(member.type, dictionary)
if contains:
return (True, [member.location] + locations)
if dictMember.parent:
if dictMember.parent == dictionary:
return (True, [dictMember.location])
else:
(contains, locations) = dictionaryContainsDictionary(dictMember.parent, dictionary)
if contains:
return (True, [dictMember.location] + locations)
return (False, None)
for member in self.members:
if member.type.isDictionary() and member.type.nullable():
raise WebIDLError("Dictionary %s has member with nullable "
"dictionary type" % self.identifier.name,
[member.location])
(contains, locations) = typeContainsDictionary(member.type, self)
if contains:
raise WebIDLError("Dictionary %s has member with itself as type." %
self.identifier.name,
[member.location] + locations)
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def _getDependentObjects(self):
deps = set(self.members)
if (self.parent):
deps.add(self.parent)
return deps
class IDLEnum(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, name, values):
assert isinstance(parentScope, IDLScope)
assert isinstance(name, IDLUnresolvedIdentifier)
if len(values) != len(set(values)):
raise WebIDLError("Enum %s has multiple identical strings" % name.name,
[location])
IDLObjectWithIdentifier.__init__(self, location, parentScope, name)
self._values = values
def values(self):
return self._values
def finish(self, scope):
pass
def validate(self):
pass
def isEnum(self):
return True
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def _getDependentObjects(self):
return set()
class IDLType(IDLObject):
Tags = enum(
# The integer types
'int8',
'uint8',
'int16',
'uint16',
'int32',
'uint32',
'int64',
'uint64',
# Additional primitive types
'bool',
'unrestricted_float',
'float',
'unrestricted_double',
# "double" last primitive type to match IDLBuiltinType
'double',
# Other types
'any',
'domstring',
'bytestring',
'usvstring',
'object',
'date',
'void',
# Funny stuff
'interface',
'dictionary',
'enum',
'callback',
'union',
'sequence',
'mozmap',
'array'
)
def __init__(self, location, name):
IDLObject.__init__(self, location)
self.name = name
self.builtin = False
def __eq__(self, other):
return other and self.builtin == other.builtin and self.name == other.name
def __ne__(self, other):
return not self == other
def __str__(self):
return str(self.name)
def isType(self):
return True
def nullable(self):
return False
def isPrimitive(self):
return False
def isBoolean(self):
return False
def isNumeric(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isVoid(self):
return self.name == "Void"
def isSequence(self):
return False
def isMozMap(self):
return False
def isArray(self):
return False
def isArrayBuffer(self):
return False
def isArrayBufferView(self):
return False
def isSharedArrayBuffer(self):
return False
def isTypedArray(self):
return False
def isCallbackInterface(self):
return False
def isNonCallbackInterface(self):
return False
def isGeckoInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in Gecko. At the moment, this returns
true for all interface types that are not types from the TypedArray
spec."""
return self.isInterface() and not self.isSpiderMonkeyInterface()
def isSpiderMonkeyInterface(self):
""" Returns a boolean indicating whether this type is an 'interface'
type that is implemented in Spidermonkey. At the moment, this
only returns true for the types from the TypedArray spec. """
return self.isInterface() and (self.isArrayBuffer() or
self.isArrayBufferView() or
self.isSharedArrayBuffer() or
self.isTypedArray())
def isDictionary(self):
return False
def isInterface(self):
return False
def isAny(self):
return self.tag() == IDLType.Tags.any
def isDate(self):
return self.tag() == IDLType.Tags.date
def isObject(self):
return self.tag() == IDLType.Tags.object
def isPromise(self):
return False
def isComplete(self):
return True
def includesRestrictedFloat(self):
return False
def isFloat(self):
return False
def isUnrestricted(self):
# Should only call this on float types
assert self.isFloat()
def isSerializable(self):
return False
def tag(self):
assert False # Override me!
def treatNonCallableAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner.callback._treatNonCallableAsNull
def treatNonObjectAsNull(self):
assert self.tag() == IDLType.Tags.callback
return self.nullable() and self.inner.callback._treatNonObjectAsNull
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def resolveType(self, parentScope):
pass
def unroll(self):
return self
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether a generic type is or is not "
"distinguishable from other things")
def isExposedInAllOf(self, exposureSet):
return True
class IDLUnresolvedType(IDLType):
"""
Unresolved types are interface types
"""
def __init__(self, location, name, promiseInnerType=None):
IDLType.__init__(self, location, name)
self._promiseInnerType = promiseInnerType
def isComplete(self):
return False
def complete(self, scope):
obj = None
try:
obj = scope._lookupIdentifier(self.name)
except:
raise WebIDLError("Unresolved type '%s'." % self.name,
[self.location])
assert obj
if obj.isType():
print obj
assert not obj.isType()
if obj.isTypedef():
assert self.name.name == obj.identifier.name
typedefType = IDLTypedefType(self.location, obj.innerType,
obj.identifier)
assert not typedefType.isComplete()
return typedefType.complete(scope)
elif obj.isCallback() and not obj.isInterface():
assert self.name.name == obj.identifier.name
return IDLCallbackType(self.location, obj)
if self._promiseInnerType and not self._promiseInnerType.isComplete():
self._promiseInnerType = self._promiseInnerType.complete(scope)
name = self.name.resolve(scope, None)
return IDLWrapperType(self.location, obj, self._promiseInnerType)
def isDistinguishableFrom(self, other):
raise TypeError("Can't tell whether an unresolved type is or is not "
"distinguishable from other things")
class IDLNullableType(IDLType):
def __init__(self, location, innerType):
assert not innerType.isVoid()
assert not innerType == BuiltinTypes[IDLBuiltinType.Types.any]
name = innerType.name
if innerType.isComplete():
name += "OrNull"
IDLType.__init__(self, location, name)
self.inner = innerType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLNullableType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "OrNull"
def nullable(self):
return True
def isCallback(self):
return self.inner.isCallback()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isUSVString(self):
return self.inner.isUSVString()
def isFloat(self):
return self.inner.isFloat()
def isUnrestricted(self):
return self.inner.isUnrestricted()
def includesRestrictedFloat(self):
return self.inner.includesRestrictedFloat()
def isInteger(self):
return self.inner.isInteger()
def isVoid(self):
return False
def isSequence(self):
return self.inner.isSequence()
def isMozMap(self):
return self.inner.isMozMap()
def isArray(self):
return self.inner.isArray()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isSharedArrayBuffer(self):
return self.inner.isSharedArrayBuffer()
def isTypedArray(self):
return self.inner.isTypedArray()
def isDictionary(self):
return self.inner.isDictionary()
def isInterface(self):
return self.inner.isInterface()
def isPromise(self):
return self.inner.isPromise()
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isEnum(self):
return self.inner.isEnum()
def isUnion(self):
return self.inner.isUnion()
def isSerializable(self):
return self.inner.isSerializable()
def tag(self):
return self.inner.tag()
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def complete(self, scope):
self.inner = self.inner.complete(scope)
if self.inner.nullable():
raise WebIDLError("The inner type of a nullable type must not be "
"a nullable type",
[self.location, self.inner.location])
if self.inner.isUnion():
if self.inner.hasNullableType:
raise WebIDLError("The inner type of a nullable type must not "
"be a union type that itself has a nullable "
"type as a member type", [self.location])
self.name = self.inner.name + "OrNull"
return self
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
if (other.nullable() or (other.isUnion() and other.hasNullableType) or
other.isDictionary()):
# Can't tell which type null should become
return False
return self.inner.isDistinguishableFrom(other)
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLSequenceType(IDLType):
def __init__(self, location, parameterType):
assert not parameterType.isVoid()
IDLType.__init__(self, location, parameterType.name)
self.inner = parameterType
self.builtin = False
# Need to set self.name up front if our inner type is already complete,
# since in that case our .complete() won't be called.
if self.inner.isComplete():
self.name = self.inner.name + "Sequence"
def __eq__(self, other):
return isinstance(other, IDLSequenceType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "Sequence"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return True
def isArray(self):
return False
def isDictionary(self):
return False
def isInterface(self):
return False
def isEnum(self):
return False
def isSerializable(self):
return self.inner.isSerializable()
def includesRestrictedFloat(self):
return self.inner.includesRestrictedFloat()
def tag(self):
return IDLType.Tags.sequence
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.inner.name + "Sequence"
return self
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isInterface() or
other.isDictionary() or
other.isCallback() or other.isMozMap())
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLMozMapType(IDLType):
# XXXbz This is pretty similar to IDLSequenceType in various ways.
# And maybe to IDLNullableType. Should we have a superclass for
# "type containing this other type"? Bug 1015318.
def __init__(self, location, parameterType):
assert not parameterType.isVoid()
IDLType.__init__(self, location, parameterType.name)
self.inner = parameterType
self.builtin = False
# Need to set self.name up front if our inner type is already complete,
# since in that case our .complete() won't be called.
if self.inner.isComplete():
self.name = self.inner.name + "MozMap"
def __eq__(self, other):
return isinstance(other, IDLMozMapType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "MozMap"
def isMozMap(self):
return True
def includesRestrictedFloat(self):
return self.inner.includesRestrictedFloat()
def tag(self):
return IDLType.Tags.mozmap
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.inner.name + "MozMap"
return self
def unroll(self):
# We do not unroll our inner. Just stop at ourselves. That
# lets us add headers for both ourselves and our inner as
# needed.
return self
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isNonCallbackInterface() or other.isSequence())
def isExposedInAllOf(self, exposureSet):
return self.inner.unroll().isExposedInAllOf(exposureSet)
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLUnionType(IDLType):
def __init__(self, location, memberTypes):
IDLType.__init__(self, location, "")
self.memberTypes = memberTypes
self.hasNullableType = False
self._dictionaryType = None
self.flatMemberTypes = None
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLUnionType) and self.memberTypes == other.memberTypes
def __hash__(self):
assert self.isComplete()
return self.name.__hash__()
def isVoid(self):
return False
def isUnion(self):
return True
def isSerializable(self):
return all(m.isSerializable() for m in self.memberTypes)
def includesRestrictedFloat(self):
return any(t.includesRestrictedFloat() for t in self.memberTypes)
def tag(self):
return IDLType.Tags.union
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
for t in self.memberTypes:
t.resolveType(parentScope)
def isComplete(self):
return self.flatMemberTypes is not None
def complete(self, scope):
def typeName(type):
if isinstance(type, IDLNullableType):
return typeName(type.inner) + "OrNull"
if isinstance(type, IDLWrapperType):
return typeName(type._identifier.object())
if isinstance(type, IDLObjectWithIdentifier):
return typeName(type.identifier)
return type.name
for (i, type) in enumerate(self.memberTypes):
if not type.isComplete():
self.memberTypes[i] = type.complete(scope)
self.name = "Or".join(typeName(type) for type in self.memberTypes)
self.flatMemberTypes = list(self.memberTypes)
i = 0
while i < len(self.flatMemberTypes):
if self.flatMemberTypes[i].nullable():
if self.hasNullableType:
raise WebIDLError("Can't have more than one nullable types in a union",
[nullableType.location, self.flatMemberTypes[i].location])
if self.hasDictionaryType():
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[self._dictionaryType.location,
self.flatMemberTypes[i].location])
self.hasNullableType = True
nullableType = self.flatMemberTypes[i]
self.flatMemberTypes[i] = self.flatMemberTypes[i].inner
continue
if self.flatMemberTypes[i].isDictionary():
if self.hasNullableType:
raise WebIDLError("Can't have a nullable type and a "
"dictionary type in a union",
[nullableType.location,
self.flatMemberTypes[i].location])
self._dictionaryType = self.flatMemberTypes[i]
elif self.flatMemberTypes[i].isUnion():
self.flatMemberTypes[i:i + 1] = self.flatMemberTypes[i].memberTypes
continue
i += 1
for (i, t) in enumerate(self.flatMemberTypes[:-1]):
for u in self.flatMemberTypes[i + 1:]:
if not t.isDistinguishableFrom(u):
raise WebIDLError("Flat member types of a union should be "
"distinguishable, " + str(t) + " is not "
"distinguishable from " + str(u),
[self.location, t.location, u.location])
return self
def isDistinguishableFrom(self, other):
if self.hasNullableType and other.nullable():
# Can't tell which type null should become
return False
if other.isUnion():
otherTypes = other.unroll().memberTypes
else:
otherTypes = [other]
# For every type in otherTypes, check that it's distinguishable from
# every type in our types
for u in otherTypes:
if any(not t.isDistinguishableFrom(u) for t in self.memberTypes):
return False
return True
def isExposedInAllOf(self, exposureSet):
# We could have different member types in different globals. Just make sure that each thing in exposureSet has one of our member types exposed in it.
for globalName in exposureSet:
if not any(t.unroll().isExposedInAllOf(set([globalName])) for t
in self.flatMemberTypes):
return False
return True
def hasDictionaryType(self):
return self._dictionaryType is not None
def hasPossiblyEmptyDictionaryType(self):
return (self._dictionaryType is not None and
self._dictionaryType.inner.canBeEmpty())
def _getDependentObjects(self):
return set(self.memberTypes)
class IDLArrayType(IDLType):
def __init__(self, location, parameterType):
assert not parameterType.isVoid()
if parameterType.isSequence():
raise WebIDLError("Array type cannot parameterize over a sequence type",
[location])
if parameterType.isMozMap():
raise WebIDLError("Array type cannot parameterize over a MozMap type",
[location])
if parameterType.isDictionary():
raise WebIDLError("Array type cannot parameterize over a dictionary type",
[location])
IDLType.__init__(self, location, parameterType.name)
self.inner = parameterType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLArrayType) and self.inner == other.inner
def __str__(self):
return self.inner.__str__() + "Array"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isVoid(self):
return False
def isSequence(self):
assert not self.inner.isSequence()
return False
def isArray(self):
return True
def isDictionary(self):
assert not self.inner.isDictionary()
return False
def isInterface(self):
return False
def isEnum(self):
return False
def tag(self):
return IDLType.Tags.array
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolveType(parentScope)
def isComplete(self):
return self.inner.isComplete()
def complete(self, scope):
self.inner = self.inner.complete(scope)
self.name = self.inner.name
if self.inner.isDictionary():
raise WebIDLError("Array type must not contain "
"dictionary as element type.",
[self.inner.location])
assert not self.inner.isSequence()
return self
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isNonCallbackInterface())
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLTypedefType(IDLType):
def __init__(self, location, innerType, name):
IDLType.__init__(self, location, name)
self.inner = innerType
self.builtin = False
def __eq__(self, other):
return isinstance(other, IDLTypedefType) and self.inner == other.inner
def __str__(self):
return self.name
def nullable(self):
return self.inner.nullable()
def isPrimitive(self):
return self.inner.isPrimitive()
def isBoolean(self):
return self.inner.isBoolean()
def isNumeric(self):
return self.inner.isNumeric()
def isString(self):
return self.inner.isString()
def isByteString(self):
return self.inner.isByteString()
def isDOMString(self):
return self.inner.isDOMString()
def isUSVString(self):
return self.inner.isUSVString()
def isVoid(self):
return self.inner.isVoid()
def isSequence(self):
return self.inner.isSequence()
def isMozMap(self):
return self.inner.isMozMap()
def isArray(self):
return self.inner.isArray()
def isDictionary(self):
return self.inner.isDictionary()
def isArrayBuffer(self):
return self.inner.isArrayBuffer()
def isArrayBufferView(self):
return self.inner.isArrayBufferView()
def isSharedArrayBuffer(self):
return self.inner.isSharedArrayBuffer()
def isTypedArray(self):
return self.inner.isTypedArray()
def isInterface(self):
return self.inner.isInterface()
def isCallbackInterface(self):
return self.inner.isCallbackInterface()
def isNonCallbackInterface(self):
return self.inner.isNonCallbackInterface()
def isComplete(self):
return False
def complete(self, parentScope):
if not self.inner.isComplete():
self.inner = self.inner.complete(parentScope)
assert self.inner.isComplete()
return self.inner
# Do we need a resolveType impl? I don't think it's particularly useful....
def tag(self):
return self.inner.tag()
def unroll(self):
return self.inner.unroll()
def isDistinguishableFrom(self, other):
return self.inner.isDistinguishableFrom(other)
def _getDependentObjects(self):
return self.inner._getDependentObjects()
class IDLTypedef(IDLObjectWithIdentifier):
def __init__(self, location, parentScope, innerType, name):
identifier = IDLUnresolvedIdentifier(location, name)
IDLObjectWithIdentifier.__init__(self, location, parentScope, identifier)
self.innerType = innerType
def __str__(self):
return "Typedef %s %s" % (self.identifier.name, self.innerType)
def finish(self, parentScope):
if not self.innerType.isComplete():
self.innerType = self.innerType.complete(parentScope)
def validate(self):
pass
def isTypedef(self):
return True
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
def _getDependentObjects(self):
return self.innerType._getDependentObjects()
class IDLWrapperType(IDLType):
def __init__(self, location, inner, promiseInnerType=None):
IDLType.__init__(self, location, inner.identifier.name)
self.inner = inner
self._identifier = inner.identifier
self.builtin = False
assert not promiseInnerType or inner.identifier.name == "Promise"
self._promiseInnerType = promiseInnerType
def __eq__(self, other):
return (isinstance(other, IDLWrapperType) and
self._identifier == other._identifier and
self.builtin == other.builtin)
def __str__(self):
return str(self.name) + " (Wrapper)"
def nullable(self):
return False
def isPrimitive(self):
return False
def isString(self):
return False
def isByteString(self):
return False
def isDOMString(self):
return False
def isUSVString(self):
return False
def isVoid(self):
return False
def isSequence(self):
return False
def isArray(self):
return False
def isDictionary(self):
return isinstance(self.inner, IDLDictionary)
def isInterface(self):
return (isinstance(self.inner, IDLInterface) or
isinstance(self.inner, IDLExternalInterface))
def isCallbackInterface(self):
return self.isInterface() and self.inner.isCallback()
def isNonCallbackInterface(self):
return self.isInterface() and not self.inner.isCallback()
def isEnum(self):
return isinstance(self.inner, IDLEnum)
def isPromise(self):
return (isinstance(self.inner, IDLInterface) and
self.inner.identifier.name == "Promise")
def promiseInnerType(self):
assert self.isPromise()
return self._promiseInnerType
def isSerializable(self):
if self.isInterface():
if self.inner.isExternal():
return False
return any(m.isMethod() and m.isJsonifier() for m in self.inner.members)
elif self.isEnum():
return True
elif self.isDictionary():
return all(m.type.isSerializable() for m in self.inner.members)
else:
raise WebIDLError("IDLWrapperType wraps type %s that we don't know if "
"is serializable" % type(self.inner), [self.location])
def resolveType(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.inner.resolve(parentScope)
def isComplete(self):
return True
def tag(self):
if self.isInterface():
return IDLType.Tags.interface
elif self.isEnum():
return IDLType.Tags.enum
elif self.isDictionary():
return IDLType.Tags.dictionary
else:
assert False
def isDistinguishableFrom(self, other):
if self.isPromise():
return False
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
assert self.isInterface() or self.isEnum() or self.isDictionary()
if self.isEnum():
return (other.isPrimitive() or other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isMozMap() or other.isArray() or
other.isDate())
if self.isDictionary() and other.nullable():
return False
if (other.isPrimitive() or other.isString() or other.isEnum() or
other.isDate() or other.isSequence()):
return True
if self.isDictionary():
return other.isNonCallbackInterface()
assert self.isInterface()
if other.isInterface():
if other.isSpiderMonkeyInterface():
# Just let |other| handle things
return other.isDistinguishableFrom(self)
assert self.isGeckoInterface() and other.isGeckoInterface()
if self.inner.isExternal() or other.unroll().inner.isExternal():
return self != other
return (len(self.inner.interfacesBasedOnSelf &
other.unroll().inner.interfacesBasedOnSelf) == 0 and
(self.isNonCallbackInterface() or
other.isNonCallbackInterface()))
if (other.isDictionary() or other.isCallback() or
other.isMozMap() or other.isArray()):
return self.isNonCallbackInterface()
# Not much else |other| can be
assert other.isObject()
return False
def isExposedInAllOf(self, exposureSet):
if not self.isInterface():
return True
iface = self.inner
if iface.isExternal():
# Let's say true, though ideally we'd only do this when
# exposureSet contains the primary global's name.
return True
if (self.isPromise() and
# Check the internal type
not self.promiseInnerType().unroll().isExposedInAllOf(exposureSet)):
return False
return iface.exposureSet.issuperset(exposureSet)
def _getDependentObjects(self):
# NB: The codegen for an interface type depends on
# a) That the identifier is in fact an interface (as opposed to
# a dictionary or something else).
# b) The native type of the interface.
# If we depend on the interface object we will also depend on
# anything the interface depends on which is undesirable. We
# considered implementing a dependency just on the interface type
# file, but then every modification to an interface would cause this
# to be regenerated which is still undesirable. We decided not to
# depend on anything, reasoning that:
# 1) Changing the concrete type of the interface requires modifying
# Bindings.conf, which is still a global dependency.
# 2) Changing an interface to a dictionary (or vice versa) with the
# same identifier should be incredibly rare.
#
# On the other hand, if our type is a dictionary, we should
# depend on it, because the member types of a dictionary
# affect whether a method taking the dictionary as an argument
# takes a JSContext* argument or not.
if self.isDictionary():
return set([self.inner])
return set()
class IDLBuiltinType(IDLType):
Types = enum(
# The integer types
'byte',
'octet',
'short',
'unsigned_short',
'long',
'unsigned_long',
'long_long',
'unsigned_long_long',
# Additional primitive types
'boolean',
'unrestricted_float',
'float',
'unrestricted_double',
# IMPORTANT: "double" must be the last primitive type listed
'double',
# Other types
'any',
'domstring',
'bytestring',
'usvstring',
'object',
'date',
'void',
# Funny stuff
'ArrayBuffer',
'ArrayBufferView',
'SharedArrayBuffer',
'Int8Array',
'Uint8Array',
'Uint8ClampedArray',
'Int16Array',
'Uint16Array',
'Int32Array',
'Uint32Array',
'Float32Array',
'Float64Array'
)
TagLookup = {
Types.byte: IDLType.Tags.int8,
Types.octet: IDLType.Tags.uint8,
Types.short: IDLType.Tags.int16,
Types.unsigned_short: IDLType.Tags.uint16,
Types.long: IDLType.Tags.int32,
Types.unsigned_long: IDLType.Tags.uint32,
Types.long_long: IDLType.Tags.int64,
Types.unsigned_long_long: IDLType.Tags.uint64,
Types.boolean: IDLType.Tags.bool,
Types.unrestricted_float: IDLType.Tags.unrestricted_float,
Types.float: IDLType.Tags.float,
Types.unrestricted_double: IDLType.Tags.unrestricted_double,
Types.double: IDLType.Tags.double,
Types.any: IDLType.Tags.any,
Types.domstring: IDLType.Tags.domstring,
Types.bytestring: IDLType.Tags.bytestring,
Types.usvstring: IDLType.Tags.usvstring,
Types.object: IDLType.Tags.object,
Types.date: IDLType.Tags.date,
Types.void: IDLType.Tags.void,
Types.ArrayBuffer: IDLType.Tags.interface,
Types.ArrayBufferView: IDLType.Tags.interface,
Types.SharedArrayBuffer: IDLType.Tags.interface,
Types.Int8Array: IDLType.Tags.interface,
Types.Uint8Array: IDLType.Tags.interface,
Types.Uint8ClampedArray: IDLType.Tags.interface,
Types.Int16Array: IDLType.Tags.interface,
Types.Uint16Array: IDLType.Tags.interface,
Types.Int32Array: IDLType.Tags.interface,
Types.Uint32Array: IDLType.Tags.interface,
Types.Float32Array: IDLType.Tags.interface,
Types.Float64Array: IDLType.Tags.interface
}
def __init__(self, location, name, type):
IDLType.__init__(self, location, name)
self.builtin = True
self._typeTag = type
def isPrimitive(self):
return self._typeTag <= IDLBuiltinType.Types.double
def isBoolean(self):
return self._typeTag == IDLBuiltinType.Types.boolean
def isNumeric(self):
return self.isPrimitive() and not self.isBoolean()
def isString(self):
return (self._typeTag == IDLBuiltinType.Types.domstring or
self._typeTag == IDLBuiltinType.Types.bytestring or
self._typeTag == IDLBuiltinType.Types.usvstring)
def isByteString(self):
return self._typeTag == IDLBuiltinType.Types.bytestring
def isDOMString(self):
return self._typeTag == IDLBuiltinType.Types.domstring
def isUSVString(self):
return self._typeTag == IDLBuiltinType.Types.usvstring
def isInteger(self):
return self._typeTag <= IDLBuiltinType.Types.unsigned_long_long
def isArrayBuffer(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBuffer
def isArrayBufferView(self):
return self._typeTag == IDLBuiltinType.Types.ArrayBufferView
def isSharedArrayBuffer(self):
return self._typeTag == IDLBuiltinType.Types.SharedArrayBuffer
def isTypedArray(self):
return (self._typeTag >= IDLBuiltinType.Types.Int8Array and
self._typeTag <= IDLBuiltinType.Types.Float64Array)
def isInterface(self):
# TypedArray things are interface types per the TypedArray spec,
# but we handle them as builtins because SpiderMonkey implements
# all of it internally.
return (self.isArrayBuffer() or
self.isArrayBufferView() or
self.isSharedArrayBuffer() or
self.isTypedArray())
def isNonCallbackInterface(self):
# All the interfaces we can be are non-callback
return self.isInterface()
def isFloat(self):
return (self._typeTag == IDLBuiltinType.Types.float or
self._typeTag == IDLBuiltinType.Types.double or
self._typeTag == IDLBuiltinType.Types.unrestricted_float or
self._typeTag == IDLBuiltinType.Types.unrestricted_double)
def isUnrestricted(self):
assert self.isFloat()
return (self._typeTag == IDLBuiltinType.Types.unrestricted_float or
self._typeTag == IDLBuiltinType.Types.unrestricted_double)
def isSerializable(self):
return self.isPrimitive() or self.isString() or self.isDate()
def includesRestrictedFloat(self):
return self.isFloat() and not self.isUnrestricted()
def tag(self):
return IDLBuiltinType.TagLookup[self._typeTag]
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
if self.isBoolean():
return (other.isNumeric() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isMozMap() or other.isArray() or
other.isDate())
if self.isNumeric():
return (other.isBoolean() or other.isString() or other.isEnum() or
other.isInterface() or other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isMozMap() or other.isArray() or
other.isDate())
if self.isString():
return (other.isPrimitive() or other.isInterface() or
other.isObject() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isMozMap() or other.isArray() or
other.isDate())
if self.isAny():
# Can't tell "any" apart from anything
return False
if self.isObject():
return other.isPrimitive() or other.isString() or other.isEnum()
if self.isDate():
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isInterface() or other.isCallback() or
other.isDictionary() or other.isSequence() or
other.isMozMap() or other.isArray())
if self.isVoid():
return not other.isVoid()
# Not much else we could be!
assert self.isSpiderMonkeyInterface()
# Like interfaces, but we know we're not a callback
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isCallback() or other.isDictionary() or
other.isSequence() or other.isMozMap() or other.isArray() or
other.isDate() or
(other.isInterface() and (
# ArrayBuffer is distinguishable from everything
# that's not an ArrayBuffer or a callback interface
(self.isArrayBuffer() and not other.isArrayBuffer()) or
(self.isSharedArrayBuffer() and not other.isSharedArrayBuffer()) or
# ArrayBufferView is distinguishable from everything
# that's not an ArrayBufferView or typed array.
(self.isArrayBufferView() and not other.isArrayBufferView() and
not other.isTypedArray()) or
# Typed arrays are distinguishable from everything
# except ArrayBufferView and the same type of typed
# array
(self.isTypedArray() and not other.isArrayBufferView() and not
(other.isTypedArray() and other.name == self.name)))))
def _getDependentObjects(self):
return set()
BuiltinTypes = {
IDLBuiltinType.Types.byte:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Byte",
IDLBuiltinType.Types.byte),
IDLBuiltinType.Types.octet:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Octet",
IDLBuiltinType.Types.octet),
IDLBuiltinType.Types.short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Short",
IDLBuiltinType.Types.short),
IDLBuiltinType.Types.unsigned_short:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedShort",
IDLBuiltinType.Types.unsigned_short),
IDLBuiltinType.Types.long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Long",
IDLBuiltinType.Types.long),
IDLBuiltinType.Types.unsigned_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLong",
IDLBuiltinType.Types.unsigned_long),
IDLBuiltinType.Types.long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "LongLong",
IDLBuiltinType.Types.long_long),
IDLBuiltinType.Types.unsigned_long_long:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnsignedLongLong",
IDLBuiltinType.Types.unsigned_long_long),
IDLBuiltinType.Types.boolean:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Boolean",
IDLBuiltinType.Types.boolean),
IDLBuiltinType.Types.float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float",
IDLBuiltinType.Types.float),
IDLBuiltinType.Types.unrestricted_float:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedFloat",
IDLBuiltinType.Types.unrestricted_float),
IDLBuiltinType.Types.double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Double",
IDLBuiltinType.Types.double),
IDLBuiltinType.Types.unrestricted_double:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "UnrestrictedDouble",
IDLBuiltinType.Types.unrestricted_double),
IDLBuiltinType.Types.any:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Any",
IDLBuiltinType.Types.any),
IDLBuiltinType.Types.domstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "String",
IDLBuiltinType.Types.domstring),
IDLBuiltinType.Types.bytestring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ByteString",
IDLBuiltinType.Types.bytestring),
IDLBuiltinType.Types.usvstring:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "USVString",
IDLBuiltinType.Types.usvstring),
IDLBuiltinType.Types.object:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Object",
IDLBuiltinType.Types.object),
IDLBuiltinType.Types.date:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Date",
IDLBuiltinType.Types.date),
IDLBuiltinType.Types.void:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Void",
IDLBuiltinType.Types.void),
IDLBuiltinType.Types.ArrayBuffer:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBuffer",
IDLBuiltinType.Types.ArrayBuffer),
IDLBuiltinType.Types.ArrayBufferView:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "ArrayBufferView",
IDLBuiltinType.Types.ArrayBufferView),
IDLBuiltinType.Types.SharedArrayBuffer:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "SharedArrayBuffer",
IDLBuiltinType.Types.SharedArrayBuffer),
IDLBuiltinType.Types.Int8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int8Array",
IDLBuiltinType.Types.Int8Array),
IDLBuiltinType.Types.Uint8Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8Array",
IDLBuiltinType.Types.Uint8Array),
IDLBuiltinType.Types.Uint8ClampedArray:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint8ClampedArray",
IDLBuiltinType.Types.Uint8ClampedArray),
IDLBuiltinType.Types.Int16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int16Array",
IDLBuiltinType.Types.Int16Array),
IDLBuiltinType.Types.Uint16Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint16Array",
IDLBuiltinType.Types.Uint16Array),
IDLBuiltinType.Types.Int32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Int32Array",
IDLBuiltinType.Types.Int32Array),
IDLBuiltinType.Types.Uint32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Uint32Array",
IDLBuiltinType.Types.Uint32Array),
IDLBuiltinType.Types.Float32Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float32Array",
IDLBuiltinType.Types.Float32Array),
IDLBuiltinType.Types.Float64Array:
IDLBuiltinType(BuiltinLocation("<builtin type>"), "Float64Array",
IDLBuiltinType.Types.Float64Array)
}
integerTypeSizes = {
IDLBuiltinType.Types.byte: (-128, 127),
IDLBuiltinType.Types.octet: (0, 255),
IDLBuiltinType.Types.short: (-32768, 32767),
IDLBuiltinType.Types.unsigned_short: (0, 65535),
IDLBuiltinType.Types.long: (-2147483648, 2147483647),
IDLBuiltinType.Types.unsigned_long: (0, 4294967295),
IDLBuiltinType.Types.long_long: (-9223372036854775808, 9223372036854775807),
IDLBuiltinType.Types.unsigned_long_long: (0, 18446744073709551615)
}
def matchIntegerValueToType(value):
for type, extremes in integerTypeSizes.items():
(min, max) = extremes
if value <= max and value >= min:
return BuiltinTypes[type]
return None
class IDLValue(IDLObject):
def __init__(self, location, type, value):
IDLObject.__init__(self, location)
self.type = type
assert isinstance(type, IDLType)
self.value = value
def coerceToType(self, type, location):
if type == self.type:
return self # Nothing to do
# We first check for unions to ensure that even if the union is nullable
# we end up with the right flat member type, not the union's type.
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
coercedValue = self.coerceToType(subtype, location)
# Create a new IDLValue to make sure that we have the
# correct float/double type. This is necessary because we
# use the value's type when it is a default value of a
# union, and the union cares about the exact float type.
return IDLValue(self.location, subtype, coercedValue.value)
except:
pass
# If the type allows null, rerun this matching on the inner type, except
# nullable enums. We handle those specially, because we want our
# default string values to stay strings even when assigned to a nullable
# enum.
elif type.nullable() and not type.isEnum():
innerValue = self.coerceToType(type.inner, location)
return IDLValue(self.location, type, innerValue.value)
elif self.type.isInteger() and type.isInteger():
# We're both integer types. See if we fit.
(min, max) = integerTypeSizes[type._typeTag]
if self.value <= max and self.value >= min:
# Promote
return IDLValue(self.location, type, self.value)
else:
raise WebIDLError("Value %s is out of range for type %s." %
(self.value, type), [location])
elif self.type.isInteger() and type.isFloat():
# Convert an integer literal into float
if -2**24 <= self.value <= 2**24:
return IDLValue(self.location, type, float(self.value))
else:
raise WebIDLError("Converting value %s to %s will lose precision." %
(self.value, type), [location])
elif self.type.isString() and type.isEnum():
# Just keep our string, but make sure it's a valid value for this enum
enum = type.unroll().inner
if self.value not in enum.values():
raise WebIDLError("'%s' is not a valid default value for enum %s"
% (self.value, enum.identifier.name),
[location, enum.location])
return self
elif self.type.isFloat() and type.isFloat():
if (not type.isUnrestricted() and
(self.value == float("inf") or self.value == float("-inf") or
math.isnan(self.value))):
raise WebIDLError("Trying to convert unrestricted value %s to non-unrestricted"
% self.value, [location])
return IDLValue(self.location, type, self.value)
elif self.type.isString() and type.isUSVString():
# Allow USVStrings to use default value just like
# DOMString. No coercion is required in this case as Codegen.py
# treats USVString just like DOMString, but with an
# extra normalization step.
assert self.type.isDOMString()
return self
raise WebIDLError("Cannot coerce type %s to type %s." %
(self.type, type), [location])
def _getDependentObjects(self):
return set()
class IDLNullValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if (not isinstance(type, IDLNullableType) and
not (type.isUnion() and type.hasNullableType) and
not (type.isUnion() and type.hasDictionaryType()) and
not type.isDictionary() and
not type.isAny()):
raise WebIDLError("Cannot coerce null value to type %s." % type,
[location])
nullValue = IDLNullValue(self.location)
if type.isUnion() and not type.nullable() and type.hasDictionaryType():
# We're actually a default value for the union's dictionary member.
# Use its type.
for t in type.flatMemberTypes:
if t.isDictionary():
nullValue.type = t
return nullValue
nullValue.type = type
return nullValue
def _getDependentObjects(self):
return set()
class IDLEmptySequenceValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if type.isUnion():
# We use the flat member types here, because if we have a nullable
# member type, or a nested union, we want the type the value
# actually coerces to, not the nullable or nested union type.
for subtype in type.unroll().flatMemberTypes:
try:
return self.coerceToType(subtype, location)
except:
pass
if not type.isSequence():
raise WebIDLError("Cannot coerce empty sequence value to type %s." % type,
[location])
emptySequenceValue = IDLEmptySequenceValue(self.location)
emptySequenceValue.type = type
return emptySequenceValue
def _getDependentObjects(self):
return set()
class IDLUndefinedValue(IDLObject):
def __init__(self, location):
IDLObject.__init__(self, location)
self.type = None
self.value = None
def coerceToType(self, type, location):
if not type.isAny():
raise WebIDLError("Cannot coerce undefined value to type %s." % type,
[location])
undefinedValue = IDLUndefinedValue(self.location)
undefinedValue.type = type
return undefinedValue
def _getDependentObjects(self):
return set()
class IDLInterfaceMember(IDLObjectWithIdentifier, IDLExposureMixins):
Tags = enum(
'Const',
'Attr',
'Method',
'MaplikeOrSetlike',
'Iterable'
)
Special = enum(
'Static',
'Stringifier'
)
AffectsValues = ("Nothing", "Everything")
DependsOnValues = ("Nothing", "DOMState", "DeviceState", "Everything")
def __init__(self, location, identifier, tag):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
IDLExposureMixins.__init__(self, location)
self.tag = tag
self._extendedAttrDict = {}
def isMethod(self):
return self.tag == IDLInterfaceMember.Tags.Method
def isAttr(self):
return self.tag == IDLInterfaceMember.Tags.Attr
def isConst(self):
return self.tag == IDLInterfaceMember.Tags.Const
def isMaplikeOrSetlikeOrIterable(self):
return (self.tag == IDLInterfaceMember.Tags.MaplikeOrSetlike or
self.tag == IDLInterfaceMember.Tags.Iterable)
def isMaplikeOrSetlike(self):
return self.tag == IDLInterfaceMember.Tags.MaplikeOrSetlike
def addExtendedAttributes(self, attrs):
for attr in attrs:
self.handleExtendedAttribute(attr)
attrlist = attr.listValue()
self._extendedAttrDict[attr.identifier()] = attrlist if len(attrlist) else True
def handleExtendedAttribute(self, attr):
pass
def getExtendedAttribute(self, name):
return self._extendedAttrDict.get(name, None)
def finish(self, scope):
# We better be exposed _somewhere_.
if (len(self._exposureGlobalNames) == 0):
print self.identifier.name
assert len(self._exposureGlobalNames) != 0
IDLExposureMixins.finish(self, scope)
def validate(self):
if (self.getExtendedAttribute("Pref") and
self.exposureSet != set([self._globalScope.primaryGlobalName])):
raise WebIDLError("[Pref] used on an interface member that is not "
"%s-only" % self._globalScope.primaryGlobalName,
[self.location])
for attribute in ["CheckAnyPermissions", "CheckAllPermissions"]:
if (self.getExtendedAttribute(attribute) and
self.exposureSet != set([self._globalScope.primaryGlobalName])):
raise WebIDLError("[%s] used on an interface member that is "
"not %s-only" %
(attribute, self.parentScope.primaryGlobalName),
[self.location])
if self.isAttr() or self.isMethod():
if self.affects == "Everything" and self.dependsOn != "Everything":
raise WebIDLError("Interface member is flagged as affecting "
"everything but not depending on everything. "
"That seems rather unlikely.",
[self.location])
if self.getExtendedAttribute("NewObject"):
if self.dependsOn == "Nothing" or self.dependsOn == "DOMState":
raise WebIDLError("A [NewObject] method is not idempotent, "
"so it has to depend on something other than DOM state.",
[self.location])
def _setDependsOn(self, dependsOn):
if self.dependsOn != "Everything":
raise WebIDLError("Trying to specify multiple different DependsOn, "
"Pure, or Constant extended attributes for "
"attribute", [self.location])
if dependsOn not in IDLInterfaceMember.DependsOnValues:
raise WebIDLError("Invalid [DependsOn=%s] on attribute" % dependsOn,
[self.location])
self.dependsOn = dependsOn
def _setAffects(self, affects):
if self.affects != "Everything":
raise WebIDLError("Trying to specify multiple different Affects, "
"Pure, or Constant extended attributes for "
"attribute", [self.location])
if affects not in IDLInterfaceMember.AffectsValues:
raise WebIDLError("Invalid [Affects=%s] on attribute" % dependsOn,
[self.location])
self.affects = affects
def _addAlias(self, alias):
if alias in self.aliases:
raise WebIDLError("Duplicate [Alias=%s] on attribute" % alias,
[self.location])
self.aliases.append(alias)
class IDLMaplikeOrSetlikeOrIterableBase(IDLInterfaceMember):
def __init__(self, location, identifier, ifaceType, keyType, valueType, ifaceKind):
IDLInterfaceMember.__init__(self, location, identifier, ifaceKind)
if keyType is not None:
assert isinstance(keyType, IDLType)
else:
assert valueType is not None
assert ifaceType in ['maplike', 'setlike', 'iterable']
if valueType is not None:
assert isinstance(valueType, IDLType)
self.keyType = keyType
self.valueType = valueType
self.maplikeOrSetlikeOrIterableType = ifaceType
self.disallowedMemberNames = []
self.disallowedNonMethodNames = []
def isMaplike(self):
return self.maplikeOrSetlikeOrIterableType == "maplike"
def isSetlike(self):
return self.maplikeOrSetlikeOrIterableType == "setlike"
def isIterable(self):
return self.maplikeOrSetlikeOrIterableType == "iterable"
def hasKeyType(self):
return self.keyType is not None
def hasValueType(self):
return self.valueType is not None
def checkCollisions(self, members, isAncestor):
for member in members:
# Check that there are no disallowed members
if (member.identifier.name in self.disallowedMemberNames and
not ((member.isMethod() and member.isMaplikeOrSetlikeOrIterableMethod()) or
(member.isAttr() and member.isMaplikeOrSetlikeAttr()))):
raise WebIDLError("Member '%s' conflicts "
"with reserved %s name." %
(member.identifier.name,
self.maplikeOrSetlikeOrIterableType),
[self.location, member.location])
# Check that there are no disallowed non-method members
if (isAncestor or (member.isAttr() or member.isConst()) and
member.identifier.name in self.disallowedNonMethodNames):
raise WebIDLError("Member '%s' conflicts "
"with reserved %s method." %
(member.identifier.name,
self.maplikeOrSetlikeOrIterableType),
[self.location, member.location])
def addMethod(self, name, members, allowExistingOperations, returnType, args=[],
chromeOnly=False, isPure=False, affectsNothing=False, newObject=False,
isIteratorAlias=False):
"""
Create an IDLMethod based on the parameters passed in.
- members is the member list to add this function to, since this is
called during the member expansion portion of interface object
building.
- chromeOnly is only True for read-only js implemented classes, to
implement underscore prefixed convenience functions which would
otherwise not be available, unlike the case of C++ bindings.
- isPure is only True for idempotent functions, so it is not valid for
things like keys, values, etc. that return a new object every time.
- affectsNothing means that nothing changes due to this method, which
affects JIT optimization behavior
- newObject means the method creates and returns a new object.
"""
# Only add name to lists for collision checks if it's not chrome
# only.
if chromeOnly:
name = "__" + name
else:
if not allowExistingOperations:
self.disallowedMemberNames.append(name)
else:
self.disallowedNonMethodNames.append(name)
# If allowExistingOperations is True, and another operation exists
# with the same name as the one we're trying to add, don't add the
# maplike/setlike operation. However, if the operation is static,
# then fail by way of creating the function, which will cause a
# naming conflict, per the spec.
if allowExistingOperations:
for m in members:
if m.identifier.name == name and m.isMethod() and not m.isStatic():
return
method = IDLMethod(self.location,
IDLUnresolvedIdentifier(self.location, name, allowDoubleUnderscore=chromeOnly),
returnType, args, maplikeOrSetlikeOrIterable=self)
# We need to be able to throw from declaration methods
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Throws",))])
if chromeOnly:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("ChromeOnly",))])
if isPure:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Pure",))])
# Following attributes are used for keys/values/entries. Can't mark
# them pure, since they return a new object each time they are run.
if affectsNothing:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("DependsOn", "Everything")),
IDLExtendedAttribute(self.location, ("Affects", "Nothing"))])
if newObject:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("NewObject",))])
if isIteratorAlias:
method.addExtendedAttributes(
[IDLExtendedAttribute(self.location, ("Alias", "@@iterator"))])
members.append(method)
def resolve(self, parentScope):
if self.keyType:
self.keyType.resolveType(parentScope)
if self.valueType:
self.valueType.resolveType(parentScope)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if self.keyType and not self.keyType.isComplete():
t = self.keyType.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.keyType = t
if self.valueType and not self.valueType.isComplete():
t = self.valueType.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.valueType = t
def validate(self):
IDLInterfaceMember.validate(self)
def handleExtendedAttribute(self, attr):
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def _getDependentObjects(self):
deps = set()
if self.keyType:
deps.add(self.keyType)
if self.valueType:
deps.add(self.valueType)
return deps
def getForEachArguments(self):
return [IDLArgument(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"callback"),
BuiltinTypes[IDLBuiltinType.Types.object]),
IDLArgument(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"thisArg"),
BuiltinTypes[IDLBuiltinType.Types.any],
optional=True)]
# Iterable adds ES6 iterator style functions and traits
# (keys/values/entries/@@iterator) to an interface.
class IDLIterable(IDLMaplikeOrSetlikeOrIterableBase):
def __init__(self, location, identifier, keyType, valueType=None, scope=None):
IDLMaplikeOrSetlikeOrIterableBase.__init__(self, location, identifier,
"iterable", keyType, valueType,
IDLInterfaceMember.Tags.Iterable)
self.iteratorType = None
def __str__(self):
return "declared iterable with key '%s' and value '%s'" % (self.keyType, self.valueType)
def expand(self, members, isJSImplemented):
"""
In order to take advantage of all of the method machinery in Codegen,
we generate our functions as if they were part of the interface
specification during parsing.
"""
# We only need to add entries/keys/values here if we're a pair iterator.
# Value iterators just copy these from %ArrayPrototype% instead.
if not self.isPairIterator():
return
# object entries()
self.addMethod("entries", members, False, self.iteratorType,
affectsNothing=True, newObject=True,
isIteratorAlias=True)
# object keys()
self.addMethod("keys", members, False, self.iteratorType,
affectsNothing=True, newObject=True)
# object values()
self.addMethod("values", members, False, self.iteratorType,
affectsNothing=True, newObject=True)
# void forEach(callback(valueType, keyType), optional any thisArg)
self.addMethod("forEach", members, False,
BuiltinTypes[IDLBuiltinType.Types.void],
self.getForEachArguments())
def isValueIterator(self):
return not self.isPairIterator()
def isPairIterator(self):
return self.hasKeyType()
# MaplikeOrSetlike adds ES6 map-or-set-like traits to an interface.
class IDLMaplikeOrSetlike(IDLMaplikeOrSetlikeOrIterableBase):
def __init__(self, location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType):
IDLMaplikeOrSetlikeOrIterableBase.__init__(self, location, identifier, maplikeOrSetlikeType,
keyType, valueType, IDLInterfaceMember.Tags.MaplikeOrSetlike)
self.readonly = readonly
self.slotIndices = None
# When generating JSAPI access code, we need to know the backing object
# type prefix to create the correct function. Generate here for reuse.
if self.isMaplike():
self.prefix = 'Map'
elif self.isSetlike():
self.prefix = 'Set'
def __str__(self):
return "declared '%s' with key '%s'" % (self.maplikeOrSetlikeOrIterableType, self.keyType)
def expand(self, members, isJSImplemented):
"""
In order to take advantage of all of the method machinery in Codegen,
we generate our functions as if they were part of the interface
specification during parsing.
"""
# Both maplike and setlike have a size attribute
members.append(IDLAttribute(self.location,
IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"), "size"),
BuiltinTypes[IDLBuiltinType.Types.unsigned_long],
True,
maplikeOrSetlike=self))
self.reserved_ro_names = ["size"]
# object entries()
self.addMethod("entries", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True, isIteratorAlias=self.isMaplike())
# object keys()
self.addMethod("keys", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True)
# object values()
self.addMethod("values", members, False, BuiltinTypes[IDLBuiltinType.Types.object],
affectsNothing=True, isIteratorAlias=self.isSetlike())
# void forEach(callback(valueType, keyType), thisVal)
self.addMethod("forEach", members, False, BuiltinTypes[IDLBuiltinType.Types.void],
self.getForEachArguments())
def getKeyArg():
return IDLArgument(self.location,
IDLUnresolvedIdentifier(self.location, "key"),
self.keyType)
# boolean has(keyType key)
self.addMethod("has", members, False, BuiltinTypes[IDLBuiltinType.Types.boolean],
[getKeyArg()], isPure=True)
if not self.readonly:
# void clear()
self.addMethod("clear", members, True, BuiltinTypes[IDLBuiltinType.Types.void],
[])
# boolean delete(keyType key)
self.addMethod("delete", members, True,
BuiltinTypes[IDLBuiltinType.Types.boolean], [getKeyArg()])
# Always generate underscored functions (e.g. __add, __clear) for js
# implemented interfaces as convenience functions.
if isJSImplemented:
# void clear()
self.addMethod("clear", members, True, BuiltinTypes[IDLBuiltinType.Types.void],
[], chromeOnly=True)
# boolean delete(keyType key)
self.addMethod("delete", members, True,
BuiltinTypes[IDLBuiltinType.Types.boolean], [getKeyArg()],
chromeOnly=True)
if self.isSetlike():
if not self.readonly:
# Add returns the set object it just added to.
# object add(keyType key)
self.addMethod("add", members, True,
BuiltinTypes[IDLBuiltinType.Types.object], [getKeyArg()])
if isJSImplemented:
self.addMethod("add", members, True,
BuiltinTypes[IDLBuiltinType.Types.object], [getKeyArg()],
chromeOnly=True)
return
# If we get this far, we're a maplike declaration.
# valueType get(keyType key)
#
# Note that instead of the value type, we're using any here. The
# validity checks should happen as things are inserted into the map,
# and using any as the return type makes code generation much simpler.
#
# TODO: Bug 1155340 may change this to use specific type to provide
# more info to JIT.
self.addMethod("get", members, False, BuiltinTypes[IDLBuiltinType.Types.any],
[getKeyArg()], isPure=True)
def getValueArg():
return IDLArgument(self.location,
IDLUnresolvedIdentifier(self.location, "value"),
self.valueType)
if not self.readonly:
self.addMethod("set", members, True, BuiltinTypes[IDLBuiltinType.Types.object],
[getKeyArg(), getValueArg()])
if isJSImplemented:
self.addMethod("set", members, True, BuiltinTypes[IDLBuiltinType.Types.object],
[getKeyArg(), getValueArg()], chromeOnly=True)
class IDLConst(IDLInterfaceMember):
def __init__(self, location, identifier, type, value):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Const)
assert isinstance(type, IDLType)
if type.isDictionary():
raise WebIDLError("A constant cannot be of a dictionary type",
[self.location])
self.type = type
self.value = value
if identifier.name == "prototype":
raise WebIDLError("The identifier of a constant must not be 'prototype'",
[location])
def __str__(self):
return "'%s' const '%s'" % (self.type, self.identifier)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if not self.type.isComplete():
type = self.type.complete(scope)
if not type.isPrimitive() and not type.isString():
locations = [self.type.location, type.location]
try:
locations.append(type.inner.location)
except:
pass
raise WebIDLError("Incorrect type for constant", locations)
self.type = type
# The value might not match the type
coercedValue = self.value.coerceToType(self.type, self.location)
assert coercedValue
self.value = coercedValue
def validate(self):
IDLInterfaceMember.validate(self)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif (identifier == "Pref" or
identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "AvailableIn" or
identifier == "CheckAnyPermissions" or
identifier == "CheckAllPermissions"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on constant" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def _getDependentObjects(self):
return set([self.type, self.value])
class IDLAttribute(IDLInterfaceMember):
def __init__(self, location, identifier, type, readonly, inherit=False,
static=False, stringifier=False, maplikeOrSetlike=None):
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Attr)
assert isinstance(type, IDLType)
self.type = type
self.readonly = readonly
self.inherit = inherit
self.static = static
self.lenientThis = False
self._unforgeable = False
self.stringifier = stringifier
self.enforceRange = False
self.clamp = False
self.slotIndices = None
assert maplikeOrSetlike is None or isinstance(maplikeOrSetlike, IDLMaplikeOrSetlike)
self.maplikeOrSetlike = maplikeOrSetlike
self.dependsOn = "Everything"
self.affects = "Everything"
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static attribute must not be 'prototype'",
[location])
if readonly and inherit:
raise WebIDLError("An attribute cannot be both 'readonly' and 'inherit'",
[self.location])
def isStatic(self):
return self.static
def __str__(self):
return "'%s' attribute '%s'" % (self.type, self.identifier)
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
if not self.type.isComplete():
t = self.type.complete(scope)
assert not isinstance(t, IDLUnresolvedType)
assert not isinstance(t, IDLTypedefType)
assert not isinstance(t.name, IDLUnresolvedIdentifier)
self.type = t
if self.type.isDictionary() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("An attribute cannot be of a dictionary type",
[self.location])
if self.type.isSequence() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a sequence "
"type", [self.location])
if self.type.isMozMap() and not self.getExtendedAttribute("Cached"):
raise WebIDLError("A non-cached attribute cannot be of a MozMap "
"type", [self.location])
if self.type.isUnion():
for f in self.type.unroll().flatMemberTypes:
if f.isDictionary():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a dictionary "
"type", [self.location, f.location])
if f.isSequence():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a sequence "
"type", [self.location, f.location])
if f.isMozMap():
raise WebIDLError("An attribute cannot be of a union "
"type if one of its member types (or "
"one of its member types's member "
"types, and so on) is a MozMap "
"type", [self.location, f.location])
if not self.type.isInterface() and self.getExtendedAttribute("PutForwards"):
raise WebIDLError("An attribute with [PutForwards] must have an "
"interface type as its type", [self.location])
if not self.type.isInterface() and self.getExtendedAttribute("SameObject"):
raise WebIDLError("An attribute with [SameObject] must have an "
"interface type as its type", [self.location])
def validate(self):
IDLInterfaceMember.validate(self)
if ((self.getExtendedAttribute("Cached") or
self.getExtendedAttribute("StoreInSlot")) and
not self.affects == "Nothing"):
raise WebIDLError("Cached attributes and attributes stored in "
"slots must be Constant or Pure or "
"Affects=Nothing, since the getter won't always "
"be called.",
[self.location])
if self.getExtendedAttribute("Frozen"):
if (not self.type.isSequence() and not self.type.isDictionary() and
not self.type.isMozMap()):
raise WebIDLError("[Frozen] is only allowed on "
"sequence-valued, dictionary-valued, and "
"MozMap-valued attributes",
[self.location])
if not self.type.unroll().isExposedInAllOf(self.exposureSet):
raise WebIDLError("Attribute returns a type that is not exposed "
"everywhere where the attribute is exposed",
[self.location])
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if identifier == "SetterThrows" and self.readonly:
raise WebIDLError("Readonly attributes must not be flagged as "
"[SetterThrows]",
[self.location])
elif (((identifier == "Throws" or identifier == "GetterThrows") and
self.getExtendedAttribute("StoreInSlot")) or
(identifier == "StoreInSlot" and
(self.getExtendedAttribute("Throws") or
self.getExtendedAttribute("GetterThrows")))):
raise WebIDLError("Throwing things can't be [StoreInSlot]",
[attr.location])
elif identifier == "LenientThis":
if not attr.noArguments():
raise WebIDLError("[LenientThis] must take no arguments",
[attr.location])
if self.isStatic():
raise WebIDLError("[LenientThis] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("CrossOriginReadable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginReadable]",
[attr.location, self.location])
if self.getExtendedAttribute("CrossOriginWritable"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [CrossOriginWritable]",
[attr.location, self.location])
self.lenientThis = True
elif identifier == "Unforgeable":
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"attributes", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject" and not self.readonly:
raise WebIDLError("[SameObject] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "Constant" and not self.readonly:
raise WebIDLError("[Constant] only allowed on readonly attributes",
[attr.location, self.location])
elif identifier == "PutForwards":
if not self.readonly:
raise WebIDLError("[PutForwards] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.isStatic():
raise WebIDLError("[PutForwards] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("Replaceable") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
if not attr.hasValue():
raise WebIDLError("[PutForwards] takes an identifier",
[attr.location, self.location])
elif identifier == "Replaceable":
if not attr.noArguments():
raise WebIDLError("[Replaceable] must take no arguments",
[attr.location])
if not self.readonly:
raise WebIDLError("[Replaceable] is only allowed on readonly "
"attributes", [attr.location, self.location])
if self.isStatic():
raise WebIDLError("[Replaceable] is only allowed on non-static "
"attributes", [attr.location, self.location])
if self.getExtendedAttribute("PutForwards") is not None:
raise WebIDLError("[PutForwards] and [Replaceable] can't both "
"appear on the same attribute",
[attr.location, self.location])
elif identifier == "LenientFloat":
if self.readonly:
raise WebIDLError("[LenientFloat] used on a readonly attribute",
[attr.location, self.location])
if not self.type.includesRestrictedFloat():
raise WebIDLError("[LenientFloat] used on an attribute with a "
"non-restricted-float type",
[attr.location, self.location])
elif identifier == "EnforceRange":
if self.readonly:
raise WebIDLError("[EnforceRange] used on a readonly attribute",
[attr.location, self.location])
self.enforceRange = True
elif identifier == "Clamp":
if self.readonly:
raise WebIDLError("[Clamp] used on a readonly attribute",
[attr.location, self.location])
self.clamp = True
elif identifier == "StoreInSlot":
if self.getExtendedAttribute("Cached"):
raise WebIDLError("[StoreInSlot] and [Cached] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif identifier == "Cached":
if self.getExtendedAttribute("StoreInSlot"):
raise WebIDLError("[Cached] and [StoreInSlot] must not be "
"specified on the same attribute",
[attr.location, self.location])
elif (identifier == "CrossOriginReadable" or
identifier == "CrossOriginWritable"):
if not attr.noArguments() and identifier == "CrossOriginReadable":
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
if self.isStatic():
raise WebIDLError("[%s] is only allowed on non-static "
"attributes" % identifier,
[attr.location, self.location])
if self.getExtendedAttribute("LenientThis"):
raise WebIDLError("[LenientThis] is not allowed in combination "
"with [%s]" % identifier,
[attr.location, self.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif identifier == "Pure":
if not attr.noArguments():
raise WebIDLError("[Pure] must take no arguments",
[attr.location])
self._setDependsOn("DOMState")
self._setAffects("Nothing")
elif identifier == "Constant" or identifier == "SameObject":
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
self._setDependsOn("Nothing")
self._setAffects("Nothing")
elif identifier == "Affects":
if not attr.hasValue():
raise WebIDLError("[Affects] takes an identifier",
[attr.location])
self._setAffects(attr.value())
elif identifier == "DependsOn":
if not attr.hasValue():
raise WebIDLError("[DependsOn] takes an identifier",
[attr.location])
if (attr.value() != "Everything" and attr.value() != "DOMState" and
not self.readonly):
raise WebIDLError("[DependsOn=%s] only allowed on "
"readonly attributes" % attr.value(),
[attr.location, self.location])
self._setDependsOn(attr.value())
elif identifier == "UseCounter":
if self.stringifier:
raise WebIDLError("[UseCounter] must not be used on a "
"stringifier attribute",
[attr.location, self.location])
elif (identifier == "Pref" or
identifier == "Deprecated" or
identifier == "SetterThrows" or
identifier == "Throws" or
identifier == "GetterThrows" or
identifier == "ChromeOnly" or
identifier == "Func" or
identifier == "Frozen" or
identifier == "AvailableIn" or
identifier == "NewObject" or
identifier == "UnsafeInPrerendering" or
identifier == "CheckAnyPermissions" or
identifier == "CheckAllPermissions" or
identifier == "BinaryName"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on attribute" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
self.type.resolveType(parentScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
def addExtendedAttributes(self, attrs):
attrs = self.checkForStringHandlingExtendedAttributes(attrs)
IDLInterfaceMember.addExtendedAttributes(self, attrs)
def hasLenientThis(self):
return self.lenientThis
def isMaplikeOrSetlikeAttr(self):
"""
True if this attribute was generated from an interface with
maplike/setlike (e.g. this is the size attribute for
maplike/setlike)
"""
return self.maplikeOrSetlike is not None
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
return set([self.type])
class IDLArgument(IDLObjectWithIdentifier):
def __init__(self, location, identifier, type, optional=False, defaultValue=None, variadic=False, dictionaryMember=False):
IDLObjectWithIdentifier.__init__(self, location, None, identifier)
assert isinstance(type, IDLType)
self.type = type
self.optional = optional
self.defaultValue = defaultValue
self.variadic = variadic
self.dictionaryMember = dictionaryMember
self._isComplete = False
self.enforceRange = False
self.clamp = False
self._allowTreatNonCallableAsNull = False
assert not variadic or optional
assert not variadic or not defaultValue
def addExtendedAttributes(self, attrs):
attrs = self.checkForStringHandlingExtendedAttributes(
attrs,
isDictionaryMember=self.dictionaryMember,
isOptional=self.optional)
for attribute in attrs:
identifier = attribute.identifier()
if identifier == "Clamp":
if not attribute.noArguments():
raise WebIDLError("[Clamp] must take no arguments",
[attribute.location])
if self.enforceRange:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location])
self.clamp = True
elif identifier == "EnforceRange":
if not attribute.noArguments():
raise WebIDLError("[EnforceRange] must take no arguments",
[attribute.location])
if self.clamp:
raise WebIDLError("[EnforceRange] and [Clamp] are mutually exclusive",
[self.location])
self.enforceRange = True
elif identifier == "TreatNonCallableAsNull":
self._allowTreatNonCallableAsNull = True
else:
raise WebIDLError("Unhandled extended attribute on %s" %
("a dictionary member" if self.dictionaryMember else
"an argument"),
[attribute.location])
def isComplete(self):
return self._isComplete
def complete(self, scope):
if self._isComplete:
return
self._isComplete = True
if not self.type.isComplete():
type = self.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self.type = type
if ((self.type.isDictionary() or
self.type.isUnion() and self.type.unroll().hasDictionaryType()) and
self.optional and not self.defaultValue and not self.variadic):
# Default optional non-variadic dictionaries to null,
# for simplicity, so the codegen doesn't have to special-case this.
self.defaultValue = IDLNullValue(self.location)
elif self.type.isAny():
assert (self.defaultValue is None or
isinstance(self.defaultValue, IDLNullValue))
# optional 'any' values always have a default value
if self.optional and not self.defaultValue and not self.variadic:
# Set the default value to undefined, for simplicity, so the
# codegen doesn't have to special-case this.
self.defaultValue = IDLUndefinedValue(self.location)
# Now do the coercing thing; this needs to happen after the
# above creation of a default value.
if self.defaultValue:
self.defaultValue = self.defaultValue.coerceToType(self.type,
self.location)
assert self.defaultValue
def allowTreatNonCallableAsNull(self):
return self._allowTreatNonCallableAsNull
def _getDependentObjects(self):
deps = set([self.type])
if self.defaultValue:
deps.add(self.defaultValue)
return deps
def canHaveMissingValue(self):
return self.optional and not self.defaultValue
class IDLCallback(IDLObjectWithScope):
def __init__(self, location, parentScope, identifier, returnType, arguments):
assert isinstance(returnType, IDLType)
self._returnType = returnType
# Clone the list
self._arguments = list(arguments)
IDLObjectWithScope.__init__(self, location, parentScope, identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
self._treatNonCallableAsNull = False
self._treatNonObjectAsNull = False
def isCallback(self):
return True
def signatures(self):
return [(self._returnType, self._arguments)]
def finish(self, scope):
if not self._returnType.isComplete():
type = self._returnType.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
self._returnType = type
for argument in self._arguments:
if argument.type.isComplete():
continue
type = argument.type.complete(scope)
assert not isinstance(type, IDLUnresolvedType)
assert not isinstance(type, IDLTypedefType)
assert not isinstance(type.name, IDLUnresolvedIdentifier)
argument.type = type
def validate(self):
pass
def addExtendedAttributes(self, attrs):
unhandledAttrs = []
for attr in attrs:
if attr.identifier() == "TreatNonCallableAsNull":
self._treatNonCallableAsNull = True
elif attr.identifier() == "TreatNonObjectAsNull":
self._treatNonObjectAsNull = True
else:
unhandledAttrs.append(attr)
if self._treatNonCallableAsNull and self._treatNonObjectAsNull:
raise WebIDLError("Cannot specify both [TreatNonCallableAsNull] "
"and [TreatNonObjectAsNull]", [self.location])
if len(unhandledAttrs) != 0:
IDLType.addExtendedAttributes(self, unhandledAttrs)
def _getDependentObjects(self):
return set([self._returnType] + self._arguments)
class IDLCallbackType(IDLType):
def __init__(self, location, callback):
IDLType.__init__(self, location, callback.identifier.name)
self.callback = callback
def isCallback(self):
return True
def tag(self):
return IDLType.Tags.callback
def isDistinguishableFrom(self, other):
if other.isPromise():
return False
if other.isUnion():
# Just forward to the union; it'll deal
return other.isDistinguishableFrom(self)
return (other.isPrimitive() or other.isString() or other.isEnum() or
other.isNonCallbackInterface() or other.isDate() or
other.isSequence())
def _getDependentObjects(self):
return self.callback._getDependentObjects()
class IDLMethodOverload:
"""
A class that represents a single overload of a WebIDL method. This is not
quite the same as an element of the "effective overload set" in the spec,
because separate IDLMethodOverloads are not created based on arguments being
optional. Rather, when multiple methods have the same name, there is an
IDLMethodOverload for each one, all hanging off an IDLMethod representing
the full set of overloads.
"""
def __init__(self, returnType, arguments, location):
self.returnType = returnType
# Clone the list of arguments, just in case
self.arguments = list(arguments)
self.location = location
def _getDependentObjects(self):
deps = set(self.arguments)
deps.add(self.returnType)
return deps
class IDLMethod(IDLInterfaceMember, IDLScope):
Special = enum(
'Getter',
'Setter',
'Creator',
'Deleter',
'LegacyCaller',
base=IDLInterfaceMember.Special
)
TypeSuffixModifier = enum(
'None',
'QMark',
'Brackets'
)
NamedOrIndexed = enum(
'Neither',
'Named',
'Indexed'
)
def __init__(self, location, identifier, returnType, arguments,
static=False, getter=False, setter=False, creator=False,
deleter=False, specialType=NamedOrIndexed.Neither,
legacycaller=False, stringifier=False, jsonifier=False,
maplikeOrSetlikeOrIterable=None):
# REVIEW: specialType is NamedOrIndexed -- wow, this is messed up.
IDLInterfaceMember.__init__(self, location, identifier,
IDLInterfaceMember.Tags.Method)
self._hasOverloads = False
assert isinstance(returnType, IDLType)
# self._overloads is a list of IDLMethodOverloads
self._overloads = [IDLMethodOverload(returnType, arguments, location)]
assert isinstance(static, bool)
self._static = static
assert isinstance(getter, bool)
self._getter = getter
assert isinstance(setter, bool)
self._setter = setter
assert isinstance(creator, bool)
self._creator = creator
assert isinstance(deleter, bool)
self._deleter = deleter
assert isinstance(legacycaller, bool)
self._legacycaller = legacycaller
assert isinstance(stringifier, bool)
self._stringifier = stringifier
assert isinstance(jsonifier, bool)
self._jsonifier = jsonifier
assert maplikeOrSetlikeOrIterable is None or isinstance(maplikeOrSetlikeOrIterable, IDLMaplikeOrSetlikeOrIterableBase)
self.maplikeOrSetlikeOrIterable = maplikeOrSetlikeOrIterable
self._specialType = specialType
self._unforgeable = False
self.dependsOn = "Everything"
self.affects = "Everything"
self.aliases = []
if static and identifier.name == "prototype":
raise WebIDLError("The identifier of a static operation must not be 'prototype'",
[location])
self.assertSignatureConstraints()
def __str__(self):
return "Method '%s'" % self.identifier
def assertSignatureConstraints(self):
if self._getter or self._deleter:
assert len(self._overloads) == 1
overload = self._overloads[0]
arguments = overload.arguments
assert len(arguments) == 1
assert (arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long])
assert not arguments[0].optional and not arguments[0].variadic
assert not self._getter or not overload.returnType.isVoid()
if self._setter or self._creator:
assert len(self._overloads) == 1
arguments = self._overloads[0].arguments
assert len(arguments) == 2
assert (arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.domstring] or
arguments[0].type == BuiltinTypes[IDLBuiltinType.Types.unsigned_long])
assert not arguments[0].optional and not arguments[0].variadic
assert not arguments[1].optional and not arguments[1].variadic
if self._stringifier:
assert len(self._overloads) == 1
overload = self._overloads[0]
assert len(overload.arguments) == 0
assert overload.returnType == BuiltinTypes[IDLBuiltinType.Types.domstring]
if self._jsonifier:
assert len(self._overloads) == 1
overload = self._overloads[0]
assert len(overload.arguments) == 0
assert overload.returnType == BuiltinTypes[IDLBuiltinType.Types.object]
def isStatic(self):
return self._static
def isGetter(self):
return self._getter
def isSetter(self):
return self._setter
def isCreator(self):
return self._creator
def isDeleter(self):
return self._deleter
def isNamed(self):
assert (self._specialType == IDLMethod.NamedOrIndexed.Named or
self._specialType == IDLMethod.NamedOrIndexed.Indexed)
return self._specialType == IDLMethod.NamedOrIndexed.Named
def isIndexed(self):
assert (self._specialType == IDLMethod.NamedOrIndexed.Named or
self._specialType == IDLMethod.NamedOrIndexed.Indexed)
return self._specialType == IDLMethod.NamedOrIndexed.Indexed
def isLegacycaller(self):
return self._legacycaller
def isStringifier(self):
return self._stringifier
def isJsonifier(self):
return self._jsonifier
def isMaplikeOrSetlikeOrIterableMethod(self):
"""
True if this method was generated as part of a
maplike/setlike/etc interface (e.g. has/get methods)
"""
return self.maplikeOrSetlikeOrIterable is not None
def isSpecial(self):
return (self.isGetter() or
self.isSetter() or
self.isCreator() or
self.isDeleter() or
self.isLegacycaller() or
self.isStringifier() or
self.isJsonifier())
def hasOverloads(self):
return self._hasOverloads
def isIdentifierLess(self):
"""
True if the method name started with __, and if the method is not a
maplike/setlike method. Interfaces with maplike/setlike will generate
methods starting with __ for chrome only backing object access in JS
implemented interfaces, so while these functions use what is considered
an non-identifier name, they actually DO have an identifier.
"""
return (self.identifier.name[:2] == "__" and
not self.isMaplikeOrSetlikeOrIterableMethod())
def resolve(self, parentScope):
assert isinstance(parentScope, IDLScope)
IDLObjectWithIdentifier.resolve(self, parentScope)
IDLScope.__init__(self, self.location, parentScope, self.identifier)
for (returnType, arguments) in self.signatures():
for argument in arguments:
argument.resolve(self)
def addOverload(self, method):
assert len(method._overloads) == 1
if self._extendedAttrDict != method ._extendedAttrDict:
raise WebIDLError("Extended attributes differ on different "
"overloads of %s" % method.identifier,
[self.location, method.location])
self._overloads.extend(method._overloads)
self._hasOverloads = True
if self.isStatic() != method.isStatic():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'static' attribute" % method.identifier,
[method.location])
if self.isLegacycaller() != method.isLegacycaller():
raise WebIDLError("Overloaded identifier %s appears with different values of the 'legacycaller' attribute" % method.identifier,
[method.location])
# Can't overload special things!
assert not self.isGetter()
assert not method.isGetter()
assert not self.isSetter()
assert not method.isSetter()
assert not self.isCreator()
assert not method.isCreator()
assert not self.isDeleter()
assert not method.isDeleter()
assert not self.isStringifier()
assert not method.isStringifier()
assert not self.isJsonifier()
assert not method.isJsonifier()
return self
def signatures(self):
return [(overload.returnType, overload.arguments) for overload in
self._overloads]
def finish(self, scope):
IDLInterfaceMember.finish(self, scope)
for overload in self._overloads:
returnType = overload.returnType
if not returnType.isComplete():
returnType = returnType.complete(scope)
assert not isinstance(returnType, IDLUnresolvedType)
assert not isinstance(returnType, IDLTypedefType)
assert not isinstance(returnType.name, IDLUnresolvedIdentifier)
overload.returnType = returnType
for argument in overload.arguments:
if not argument.isComplete():
argument.complete(scope)
assert argument.type.isComplete()
# Now compute various information that will be used by the
# WebIDL overload resolution algorithm.
self.maxArgCount = max(len(s[1]) for s in self.signatures())
self.allowedArgCounts = [i for i in range(self.maxArgCount+1)
if len(self.signaturesForArgCount(i)) != 0]
def validate(self):
IDLInterfaceMember.validate(self)
# Make sure our overloads are properly distinguishable and don't have
# different argument types before the distinguishing args.
for argCount in self.allowedArgCounts:
possibleOverloads = self.overloadsForArgCount(argCount)
if len(possibleOverloads) == 1:
continue
distinguishingIndex = self.distinguishingIndexForArgCount(argCount)
for idx in range(distinguishingIndex):
firstSigType = possibleOverloads[0].arguments[idx].type
for overload in possibleOverloads[1:]:
if overload.arguments[idx].type != firstSigType:
raise WebIDLError(
"Signatures for method '%s' with %d arguments have "
"different types of arguments at index %d, which "
"is before distinguishing index %d" %
(self.identifier.name, argCount, idx,
distinguishingIndex),
[self.location, overload.location])
overloadWithPromiseReturnType = None
overloadWithoutPromiseReturnType = None
for overload in self._overloads:
returnType = overload.returnType
if not returnType.unroll().isExposedInAllOf(self.exposureSet):
raise WebIDLError("Overload returns a type that is not exposed "
"everywhere where the method is exposed",
[overload.location])
variadicArgument = None
arguments = overload.arguments
for (idx, argument) in enumerate(arguments):
assert argument.type.isComplete()
if ((argument.type.isDictionary() and
argument.type.inner.canBeEmpty())or
(argument.type.isUnion() and
argument.type.unroll().hasPossiblyEmptyDictionaryType())):
# Optional dictionaries and unions containing optional
# dictionaries at the end of the list or followed by
# optional arguments must be optional.
if (not argument.optional and
all(arg.optional for arg in arguments[idx+1:])):
raise WebIDLError("Dictionary argument or union "
"argument containing a dictionary "
"not followed by a required argument "
"must be optional",
[argument.location])
# An argument cannot be a Nullable Dictionary
if argument.type.nullable():
raise WebIDLError("An argument cannot be a nullable "
"dictionary or nullable union "
"containing a dictionary",
[argument.location])
# Only the last argument can be variadic
if variadicArgument:
raise WebIDLError("Variadic argument is not last argument",
[variadicArgument.location])
if argument.variadic:
variadicArgument = argument
if returnType.isPromise():
overloadWithPromiseReturnType = overload
else:
overloadWithoutPromiseReturnType = overload
# Make sure either all our overloads return Promises or none do
if overloadWithPromiseReturnType and overloadWithoutPromiseReturnType:
raise WebIDLError("We have overloads with both Promise and "
"non-Promise return types",
[overloadWithPromiseReturnType.location,
overloadWithoutPromiseReturnType.location])
if overloadWithPromiseReturnType and self._legacycaller:
raise WebIDLError("May not have a Promise return type for a "
"legacycaller.",
[overloadWithPromiseReturnType.location])
if self.getExtendedAttribute("StaticClassOverride") and not \
(self.identifier.scope.isJSImplemented() and self.isStatic()):
raise WebIDLError("StaticClassOverride can be applied to static"
" methods on JS-implemented classes only.",
[self.location])
def overloadsForArgCount(self, argc):
return [overload for overload in self._overloads if
len(overload.arguments) == argc or
(len(overload.arguments) > argc and
all(arg.optional for arg in overload.arguments[argc:])) or
(len(overload.arguments) < argc and
len(overload.arguments) > 0 and
overload.arguments[-1].variadic)]
def signaturesForArgCount(self, argc):
return [(overload.returnType, overload.arguments) for overload
in self.overloadsForArgCount(argc)]
def locationsForArgCount(self, argc):
return [overload.location for overload in self.overloadsForArgCount(argc)]
def distinguishingIndexForArgCount(self, argc):
def isValidDistinguishingIndex(idx, signatures):
for (firstSigIndex, (firstRetval, firstArgs)) in enumerate(signatures[:-1]):
for (secondRetval, secondArgs) in signatures[firstSigIndex+1:]:
if idx < len(firstArgs):
firstType = firstArgs[idx].type
else:
assert(firstArgs[-1].variadic)
firstType = firstArgs[-1].type
if idx < len(secondArgs):
secondType = secondArgs[idx].type
else:
assert(secondArgs[-1].variadic)
secondType = secondArgs[-1].type
if not firstType.isDistinguishableFrom(secondType):
return False
return True
signatures = self.signaturesForArgCount(argc)
for idx in range(argc):
if isValidDistinguishingIndex(idx, signatures):
return idx
# No valid distinguishing index. Time to throw
locations = self.locationsForArgCount(argc)
raise WebIDLError("Signatures with %d arguments for method '%s' are not "
"distinguishable" % (argc, self.identifier.name),
locations)
def handleExtendedAttribute(self, attr):
identifier = attr.identifier()
if identifier == "GetterThrows":
raise WebIDLError("Methods must not be flagged as "
"[GetterThrows]",
[attr.location, self.location])
elif identifier == "SetterThrows":
raise WebIDLError("Methods must not be flagged as "
"[SetterThrows]",
[attr.location, self.location])
elif identifier == "Unforgeable":
if self.isStatic():
raise WebIDLError("[Unforgeable] is only allowed on non-static "
"methods", [attr.location, self.location])
self._unforgeable = True
elif identifier == "SameObject":
raise WebIDLError("Methods must not be flagged as [SameObject]",
[attr.location, self.location])
elif identifier == "Constant":
raise WebIDLError("Methods must not be flagged as [Constant]",
[attr.location, self.location])
elif identifier == "PutForwards":
raise WebIDLError("Only attributes support [PutForwards]",
[attr.location, self.location])
elif identifier == "LenientFloat":
# This is called before we've done overload resolution
assert len(self.signatures()) == 1
sig = self.signatures()[0]
if not sig[0].isVoid():
raise WebIDLError("[LenientFloat] used on a non-void method",
[attr.location, self.location])
if not any(arg.type.includesRestrictedFloat() for arg in sig[1]):
raise WebIDLError("[LenientFloat] used on an operation with no "
"restricted float type arguments",
[attr.location, self.location])
elif identifier == "Exposed":
convertExposedAttrToGlobalNameSet(attr, self._exposureGlobalNames)
elif (identifier == "CrossOriginCallable" or
identifier == "WebGLHandlesContextLoss"):
# Known no-argument attributes.
if not attr.noArguments():
raise WebIDLError("[%s] must take no arguments" % identifier,
[attr.location])
elif identifier == "Pure":
if not attr.noArguments():
raise WebIDLError("[Pure] must take no arguments",
[attr.location])
self._setDependsOn("DOMState")
self._setAffects("Nothing")
elif identifier == "Affects":
if not attr.hasValue():
raise WebIDLError("[Affects] takes an identifier",
[attr.location])
self._setAffects(attr.value())
elif identifier == "DependsOn":
if not attr.hasValue():
raise WebIDLError("[DependsOn] takes an identifier",
[attr.location])
self._setDependsOn(attr.value())
elif identifier == "Alias":
if not attr.hasValue():
raise WebIDLError("[Alias] takes an identifier or string",
[attr.location])
self._addAlias(attr.value())
elif identifier == "UseCounter":
if self.isSpecial():
raise WebIDLError("[UseCounter] must not be used on a special "
"operation",
[attr.location, self.location])
elif (identifier == "Throws" or
identifier == "NewObject" or
identifier == "ChromeOnly" or
identifier == "UnsafeInPrerendering" or
identifier == "Pref" or
identifier == "Deprecated" or
identifier == "Func" or
identifier == "AvailableIn" or
identifier == "CheckAnyPermissions" or
identifier == "CheckAllPermissions" or
identifier == "BinaryName" or
identifier == "StaticClassOverride"):
# Known attributes that we don't need to do anything with here
pass
else:
raise WebIDLError("Unknown extended attribute %s on method" % identifier,
[attr.location])
IDLInterfaceMember.handleExtendedAttribute(self, attr)
def returnsPromise(self):
return self._overloads[0].returnType.isPromise()
def isUnforgeable(self):
return self._unforgeable
def _getDependentObjects(self):
deps = set()
for overload in self._overloads:
deps.update(overload._getDependentObjects())
return deps
class IDLImplementsStatement(IDLObject):
def __init__(self, location, implementor, implementee):
IDLObject.__init__(self, location)
self.implementor = implementor
self.implementee = implementee
self._finished = False
def finish(self, scope):
if self._finished:
return
assert(isinstance(self.implementor, IDLIdentifierPlaceholder))
assert(isinstance(self.implementee, IDLIdentifierPlaceholder))
implementor = self.implementor.finish(scope)
implementee = self.implementee.finish(scope)
# NOTE: we depend on not setting self.implementor and
# self.implementee here to keep track of the original
# locations.
if not isinstance(implementor, IDLInterface):
raise WebIDLError("Left-hand side of 'implements' is not an "
"interface",
[self.implementor.location])
if implementor.isCallback():
raise WebIDLError("Left-hand side of 'implements' is a callback "
"interface",
[self.implementor.location])
if not isinstance(implementee, IDLInterface):
raise WebIDLError("Right-hand side of 'implements' is not an "
"interface",
[self.implementee.location])
if implementee.isCallback():
raise WebIDLError("Right-hand side of 'implements' is a callback "
"interface",
[self.implementee.location])
implementor.addImplementedInterface(implementee)
self.implementor = implementor
self.implementee = implementee
def validate(self):
pass
def addExtendedAttributes(self, attrs):
assert len(attrs) == 0
class IDLExtendedAttribute(IDLObject):
"""
A class to represent IDL extended attributes so we can give them locations
"""
def __init__(self, location, tuple):
IDLObject.__init__(self, location)
self._tuple = tuple
def identifier(self):
return self._tuple[0]
def noArguments(self):
return len(self._tuple) == 1
def hasValue(self):
return len(self._tuple) >= 2 and isinstance(self._tuple[1], str)
def value(self):
assert(self.hasValue())
return self._tuple[1]
def hasArgs(self):
return (len(self._tuple) == 2 and isinstance(self._tuple[1], list) or
len(self._tuple) == 3)
def args(self):
assert(self.hasArgs())
# Our args are our last element
return self._tuple[-1]
def listValue(self):
"""
Backdoor for storing random data in _extendedAttrDict
"""
return list(self._tuple)[1:]
# Parser
class Tokenizer(object):
tokens = [
"INTEGER",
"FLOATLITERAL",
"IDENTIFIER",
"STRING",
"WHITESPACE",
"OTHER"
]
def t_FLOATLITERAL(self, t):
r'(-?(([0-9]+\.[0-9]*|[0-9]*\.[0-9]+)([Ee][+-]?[0-9]+)?|[0-9]+[Ee][+-]?[0-9]+|Infinity))|NaN'
t.value = float(t.value)
return t
def t_INTEGER(self, t):
r'-?(0([0-7]+|[Xx][0-9A-Fa-f]+)?|[1-9][0-9]*)'
try:
# Can't use int(), because that doesn't handle octal properly.
t.value = parseInt(t.value)
except:
raise WebIDLError("Invalid integer literal",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self._filename)])
return t
def t_IDENTIFIER(self, t):
r'[A-Z_a-z][0-9A-Z_a-z-]*'
t.type = self.keywords.get(t.value, 'IDENTIFIER')
return t
def t_STRING(self, t):
r'"[^"]*"'
t.value = t.value[1:-1]
return t
def t_WHITESPACE(self, t):
r'[\t\n\r ]+|[\t\n\r ]*((//[^\n]*|/\*.*?\*/)[\t\n\r ]*)+'
pass
def t_ELLIPSIS(self, t):
r'\.\.\.'
t.type = self.keywords.get(t.value)
return t
def t_OTHER(self, t):
r'[^\t\n\r 0-9A-Z_a-z]'
t.type = self.keywords.get(t.value, 'OTHER')
return t
keywords = {
"module": "MODULE",
"interface": "INTERFACE",
"partial": "PARTIAL",
"dictionary": "DICTIONARY",
"exception": "EXCEPTION",
"enum": "ENUM",
"callback": "CALLBACK",
"typedef": "TYPEDEF",
"implements": "IMPLEMENTS",
"const": "CONST",
"null": "NULL",
"true": "TRUE",
"false": "FALSE",
"serializer": "SERIALIZER",
"stringifier": "STRINGIFIER",
"jsonifier": "JSONIFIER",
"unrestricted": "UNRESTRICTED",
"attribute": "ATTRIBUTE",
"readonly": "READONLY",
"inherit": "INHERIT",
"static": "STATIC",
"getter": "GETTER",
"setter": "SETTER",
"creator": "CREATOR",
"deleter": "DELETER",
"legacycaller": "LEGACYCALLER",
"optional": "OPTIONAL",
"...": "ELLIPSIS",
"::": "SCOPE",
"Date": "DATE",
"DOMString": "DOMSTRING",
"ByteString": "BYTESTRING",
"USVString": "USVSTRING",
"any": "ANY",
"boolean": "BOOLEAN",
"byte": "BYTE",
"double": "DOUBLE",
"float": "FLOAT",
"long": "LONG",
"object": "OBJECT",
"octet": "OCTET",
"Promise": "PROMISE",
"required": "REQUIRED",
"sequence": "SEQUENCE",
"MozMap": "MOZMAP",
"short": "SHORT",
"unsigned": "UNSIGNED",
"void": "VOID",
":": "COLON",
";": "SEMICOLON",
"{": "LBRACE",
"}": "RBRACE",
"(": "LPAREN",
")": "RPAREN",
"[": "LBRACKET",
"]": "RBRACKET",
"?": "QUESTIONMARK",
",": "COMMA",
"=": "EQUALS",
"<": "LT",
">": "GT",
"ArrayBuffer": "ARRAYBUFFER",
"SharedArrayBuffer": "SHAREDARRAYBUFFER",
"or": "OR",
"maplike": "MAPLIKE",
"setlike": "SETLIKE",
"iterable": "ITERABLE"
}
tokens.extend(keywords.values())
def t_error(self, t):
raise WebIDLError("Unrecognized Input",
[Location(lexer=self.lexer,
lineno=self.lexer.lineno,
lexpos=self.lexer.lexpos,
filename=self.filename)])
def __init__(self, outputdir, lexer=None):
if lexer:
self.lexer = lexer
else:
self.lexer = lex.lex(object=self,
outputdir=outputdir,
lextab='webidllex',
reflags=re.DOTALL)
class SqueakyCleanLogger(object):
errorWhitelist = [
# Web IDL defines the WHITESPACE token, but doesn't actually
# use it ... so far.
"Token 'WHITESPACE' defined, but not used",
# And that means we have an unused token
"There is 1 unused token",
# Web IDL defines a OtherOrComma rule that's only used in
# ExtendedAttributeInner, which we don't use yet.
"Rule 'OtherOrComma' defined, but not used",
# And an unused rule
"There is 1 unused rule",
# And the OtherOrComma grammar symbol is unreachable.
"Symbol 'OtherOrComma' is unreachable",
# Which means the Other symbol is unreachable.
"Symbol 'Other' is unreachable",
]
def __init__(self):
self.errors = []
def debug(self, msg, *args, **kwargs):
pass
info = debug
def warning(self, msg, *args, **kwargs):
if msg == "%s:%d: Rule '%s' defined, but not used":
# Munge things so we don't have to hardcode filenames and
# line numbers in our whitelist.
whitelistmsg = "Rule '%s' defined, but not used"
whitelistargs = args[2:]
else:
whitelistmsg = msg
whitelistargs = args
if (whitelistmsg % whitelistargs) not in SqueakyCleanLogger.errorWhitelist:
self.errors.append(msg % args)
error = warning
def reportGrammarErrors(self):
if self.errors:
raise WebIDLError("\n".join(self.errors), [])
class Parser(Tokenizer):
def getLocation(self, p, i):
return Location(self.lexer, p.lineno(i), p.lexpos(i), self._filename)
def globalScope(self):
return self._globalScope
# The p_Foo functions here must match the WebIDL spec's grammar.
# It's acceptable to split things at '|' boundaries.
def p_Definitions(self, p):
"""
Definitions : ExtendedAttributeList Definition Definitions
"""
if p[2]:
p[0] = [p[2]]
p[2].addExtendedAttributes(p[1])
else:
assert not p[1]
p[0] = []
p[0].extend(p[3])
def p_DefinitionsEmpty(self, p):
"""
Definitions :
"""
p[0] = []
def p_Definition(self, p):
"""
Definition : CallbackOrInterface
| PartialInterface
| Dictionary
| Exception
| Enum
| Typedef
| ImplementsStatement
"""
p[0] = p[1]
assert p[1] # We might not have implemented something ...
def p_CallbackOrInterfaceCallback(self, p):
"""
CallbackOrInterface : CALLBACK CallbackRestOrInterface
"""
if p[2].isInterface():
assert isinstance(p[2], IDLInterface)
p[2].setCallback(True)
p[0] = p[2]
def p_CallbackOrInterfaceInterface(self, p):
"""
CallbackOrInterface : Interface
"""
p[0] = p[1]
def p_CallbackRestOrInterface(self, p):
"""
CallbackRestOrInterface : CallbackRest
| Interface
"""
assert p[1]
p[0] = p[1]
def p_Interface(self, p):
"""
Interface : INTERFACE IDENTIFIER Inheritance LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
parent = p[3]
try:
existingObj = self.globalScope()._lookupIdentifier(identifier)
if existingObj:
p[0] = existingObj
if not isinstance(p[0], IDLInterface):
raise WebIDLError("Interface has the same name as "
"non-interface object",
[location, p[0].location])
p[0].setNonPartial(location, parent, members)
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
iface = IDLInterface(location, self.globalScope(), identifier, parent,
members, isKnownNonPartial=True)
p[0] = iface
def p_InterfaceForwardDecl(self, p):
"""
Interface : INTERFACE IDENTIFIER SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
try:
if self.globalScope()._lookupIdentifier(identifier):
p[0] = self.globalScope()._lookupIdentifier(identifier)
if not isinstance(p[0], IDLExternalInterface):
raise WebIDLError("Name collision between external "
"interface declaration for identifier "
"%s and %s" % (identifier.name, p[0]),
[location, p[0].location])
return
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
p[0] = IDLExternalInterface(location, self.globalScope(), identifier)
def p_PartialInterface(self, p):
"""
PartialInterface : PARTIAL INTERFACE IDENTIFIER LBRACE InterfaceMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
members = p[5]
nonPartialInterface = None
try:
nonPartialInterface = self.globalScope()._lookupIdentifier(identifier)
if nonPartialInterface:
if not isinstance(nonPartialInterface, IDLInterface):
raise WebIDLError("Partial interface has the same name as "
"non-interface object",
[location, nonPartialInterface.location])
except Exception, ex:
if isinstance(ex, WebIDLError):
raise ex
pass
if not nonPartialInterface:
nonPartialInterface = IDLInterface(location, self.globalScope(),
identifier, None,
[], isKnownNonPartial=False)
partialInterface = IDLPartialInterface(location, identifier, members,
nonPartialInterface)
p[0] = partialInterface
def p_Inheritance(self, p):
"""
Inheritance : COLON ScopedName
"""
p[0] = IDLIdentifierPlaceholder(self.getLocation(p, 2), p[2])
def p_InheritanceEmpty(self, p):
"""
Inheritance :
"""
pass
def p_InterfaceMembers(self, p):
"""
InterfaceMembers : ExtendedAttributeList InterfaceMember InterfaceMembers
"""
p[0] = [p[2]] if p[2] else []
assert not p[1] or p[2]
p[2].addExtendedAttributes(p[1])
p[0].extend(p[3])
def p_InterfaceMembersEmpty(self, p):
"""
InterfaceMembers :
"""
p[0] = []
def p_InterfaceMember(self, p):
"""
InterfaceMember : Const
| AttributeOrOperationOrMaplikeOrSetlikeOrIterable
"""
p[0] = p[1]
def p_Dictionary(self, p):
"""
Dictionary : DICTIONARY IDENTIFIER Inheritance LBRACE DictionaryMembers RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
members = p[5]
p[0] = IDLDictionary(location, self.globalScope(), identifier, p[3], members)
def p_DictionaryMembers(self, p):
"""
DictionaryMembers : ExtendedAttributeList DictionaryMember DictionaryMembers
|
"""
if len(p) == 1:
# We're at the end of the list
p[0] = []
return
# Add our extended attributes
p[2].addExtendedAttributes(p[1])
p[0] = [p[2]]
p[0].extend(p[3])
def p_DictionaryMember(self, p):
"""
DictionaryMember : Required Type IDENTIFIER Default SEMICOLON
"""
# These quack a lot like optional arguments, so just treat them that way.
t = p[2]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
defaultValue = p[4]
optional = not p[1]
if not optional and defaultValue:
raise WebIDLError("Required dictionary members can't have a default value.",
[self.getLocation(p, 4)])
p[0] = IDLArgument(self.getLocation(p, 3), identifier, t,
optional=optional,
defaultValue=defaultValue, variadic=False,
dictionaryMember=True)
def p_Default(self, p):
"""
Default : EQUALS DefaultValue
|
"""
if len(p) > 1:
p[0] = p[2]
else:
p[0] = None
def p_DefaultValue(self, p):
"""
DefaultValue : ConstValue
| LBRACKET RBRACKET
"""
if len(p) == 2:
p[0] = p[1]
else:
assert len(p) == 3 # Must be []
p[0] = IDLEmptySequenceValue(self.getLocation(p, 1))
def p_Exception(self, p):
"""
Exception : EXCEPTION IDENTIFIER Inheritance LBRACE ExceptionMembers RBRACE SEMICOLON
"""
pass
def p_Enum(self, p):
"""
Enum : ENUM IDENTIFIER LBRACE EnumValueList RBRACE SEMICOLON
"""
location = self.getLocation(p, 1)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 2), p[2])
values = p[4]
assert values
p[0] = IDLEnum(location, self.globalScope(), identifier, values)
def p_EnumValueList(self, p):
"""
EnumValueList : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListComma(self, p):
"""
EnumValueListComma : COMMA EnumValueListString
"""
p[0] = p[2]
def p_EnumValueListCommaEmpty(self, p):
"""
EnumValueListComma :
"""
p[0] = []
def p_EnumValueListString(self, p):
"""
EnumValueListString : STRING EnumValueListComma
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_EnumValueListStringEmpty(self, p):
"""
EnumValueListString :
"""
p[0] = []
def p_CallbackRest(self, p):
"""
CallbackRest : IDENTIFIER EQUALS ReturnType LPAREN ArgumentList RPAREN SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
p[0] = IDLCallback(self.getLocation(p, 1), self.globalScope(),
identifier, p[3], p[5])
def p_ExceptionMembers(self, p):
"""
ExceptionMembers : ExtendedAttributeList ExceptionMember ExceptionMembers
|
"""
pass
def p_Typedef(self, p):
"""
Typedef : TYPEDEF Type IDENTIFIER SEMICOLON
"""
typedef = IDLTypedef(self.getLocation(p, 1), self.globalScope(),
p[2], p[3])
p[0] = typedef
def p_ImplementsStatement(self, p):
"""
ImplementsStatement : ScopedName IMPLEMENTS ScopedName SEMICOLON
"""
assert(p[2] == "implements")
implementor = IDLIdentifierPlaceholder(self.getLocation(p, 1), p[1])
implementee = IDLIdentifierPlaceholder(self.getLocation(p, 3), p[3])
p[0] = IDLImplementsStatement(self.getLocation(p, 1), implementor,
implementee)
def p_Const(self, p):
"""
Const : CONST ConstType IDENTIFIER EQUALS ConstValue SEMICOLON
"""
location = self.getLocation(p, 1)
type = p[2]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 3), p[3])
value = p[5]
p[0] = IDLConst(location, identifier, type, value)
def p_ConstValueBoolean(self, p):
"""
ConstValue : BooleanLiteral
"""
location = self.getLocation(p, 1)
booleanType = BuiltinTypes[IDLBuiltinType.Types.boolean]
p[0] = IDLValue(location, booleanType, p[1])
def p_ConstValueInteger(self, p):
"""
ConstValue : INTEGER
"""
location = self.getLocation(p, 1)
# We don't know ahead of time what type the integer literal is.
# Determine the smallest type it could possibly fit in and use that.
integerType = matchIntegerValueToType(p[1])
if integerType is None:
raise WebIDLError("Integer literal out of range", [location])
p[0] = IDLValue(location, integerType, p[1])
def p_ConstValueFloat(self, p):
"""
ConstValue : FLOATLITERAL
"""
location = self.getLocation(p, 1)
p[0] = IDLValue(location, BuiltinTypes[IDLBuiltinType.Types.unrestricted_float], p[1])
def p_ConstValueString(self, p):
"""
ConstValue : STRING
"""
location = self.getLocation(p, 1)
stringType = BuiltinTypes[IDLBuiltinType.Types.domstring]
p[0] = IDLValue(location, stringType, p[1])
def p_ConstValueNull(self, p):
"""
ConstValue : NULL
"""
p[0] = IDLNullValue(self.getLocation(p, 1))
def p_BooleanLiteralTrue(self, p):
"""
BooleanLiteral : TRUE
"""
p[0] = True
def p_BooleanLiteralFalse(self, p):
"""
BooleanLiteral : FALSE
"""
p[0] = False
def p_AttributeOrOperationOrMaplikeOrSetlikeOrIterable(self, p):
"""
AttributeOrOperationOrMaplikeOrSetlikeOrIterable : Attribute
| Maplike
| Setlike
| Iterable
| Operation
"""
p[0] = p[1]
def p_Iterable(self, p):
"""
Iterable : ITERABLE LT Type GT SEMICOLON
| ITERABLE LT Type COMMA Type GT SEMICOLON
"""
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__iterable",
allowDoubleUnderscore=True)
if (len(p) > 6):
keyType = p[3]
valueType = p[5]
else:
keyType = None
valueType = p[3]
p[0] = IDLIterable(location, identifier, keyType, valueType, self.globalScope())
def p_Setlike(self, p):
"""
Setlike : ReadOnly SETLIKE LT Type GT SEMICOLON
"""
readonly = p[1]
maplikeOrSetlikeType = p[2]
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__setlike",
allowDoubleUnderscore=True)
keyType = p[4]
valueType = keyType
p[0] = IDLMaplikeOrSetlike(location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType)
def p_Maplike(self, p):
"""
Maplike : ReadOnly MAPLIKE LT Type COMMA Type GT SEMICOLON
"""
readonly = p[1]
maplikeOrSetlikeType = p[2]
location = self.getLocation(p, 2)
identifier = IDLUnresolvedIdentifier(location, "__maplike",
allowDoubleUnderscore=True)
keyType = p[4]
valueType = p[6]
p[0] = IDLMaplikeOrSetlike(location, identifier, maplikeOrSetlikeType,
readonly, keyType, valueType)
def p_AttributeWithQualifier(self, p):
"""
Attribute : Qualifier AttributeRest
"""
static = IDLInterfaceMember.Special.Static in p[1]
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly,
static=static, stringifier=stringifier)
def p_AttributeInherited(self, p):
"""
Attribute : INHERIT AttributeRest
"""
(location, identifier, type, readonly) = p[2]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=True)
def p_Attribute(self, p):
"""
Attribute : AttributeRest
"""
(location, identifier, type, readonly) = p[1]
p[0] = IDLAttribute(location, identifier, type, readonly, inherit=False)
def p_AttributeRest(self, p):
"""
AttributeRest : ReadOnly ATTRIBUTE Type AttributeName SEMICOLON
"""
location = self.getLocation(p, 2)
readonly = p[1]
t = p[3]
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 4), p[4])
p[0] = (location, identifier, t, readonly)
def p_ReadOnly(self, p):
"""
ReadOnly : READONLY
"""
p[0] = True
def p_ReadOnlyEmpty(self, p):
"""
ReadOnly :
"""
p[0] = False
def p_Operation(self, p):
"""
Operation : Qualifiers OperationRest
"""
qualifiers = p[1]
# Disallow duplicates in the qualifier set
if not len(set(qualifiers)) == len(qualifiers):
raise WebIDLError("Duplicate qualifiers are not allowed",
[self.getLocation(p, 1)])
static = IDLInterfaceMember.Special.Static in p[1]
# If static is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not static or len(qualifiers) == 1
stringifier = IDLInterfaceMember.Special.Stringifier in p[1]
# If stringifier is there that's all that's allowed. This is disallowed
# by the parser, so we can assert here.
assert not stringifier or len(qualifiers) == 1
getter = True if IDLMethod.Special.Getter in p[1] else False
setter = True if IDLMethod.Special.Setter in p[1] else False
creator = True if IDLMethod.Special.Creator in p[1] else False
deleter = True if IDLMethod.Special.Deleter in p[1] else False
legacycaller = True if IDLMethod.Special.LegacyCaller in p[1] else False
if getter or deleter:
if setter or creator:
raise WebIDLError("getter and deleter are incompatible with setter and creator",
[self.getLocation(p, 1)])
(returnType, identifier, arguments) = p[2]
assert isinstance(returnType, IDLType)
specialType = IDLMethod.NamedOrIndexed.Neither
if getter or deleter:
if len(arguments) != 1:
raise WebIDLError("%s has wrong number of arguments" %
("getter" if getter else "deleter"),
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
else:
raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
("getter" if getter else "deleter"),
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("%s cannot have %s argument" %
("getter" if getter else "deleter",
"optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if getter:
if returnType.isVoid():
raise WebIDLError("getter cannot have void return type",
[self.getLocation(p, 2)])
if setter or creator:
if len(arguments) != 2:
raise WebIDLError("%s has wrong number of arguments" %
("setter" if setter else "creator"),
[self.getLocation(p, 2)])
argType = arguments[0].type
if argType == BuiltinTypes[IDLBuiltinType.Types.domstring]:
specialType = IDLMethod.NamedOrIndexed.Named
elif argType == BuiltinTypes[IDLBuiltinType.Types.unsigned_long]:
specialType = IDLMethod.NamedOrIndexed.Indexed
else:
raise WebIDLError("%s has wrong argument type (must be DOMString or UnsignedLong)" %
("setter" if setter else "creator"),
[arguments[0].location])
if arguments[0].optional or arguments[0].variadic:
raise WebIDLError("%s cannot have %s argument" %
("setter" if setter else "creator",
"optional" if arguments[0].optional else "variadic"),
[arguments[0].location])
if arguments[1].optional or arguments[1].variadic:
raise WebIDLError("%s cannot have %s argument" %
("setter" if setter else "creator",
"optional" if arguments[1].optional else "variadic"),
[arguments[1].location])
if stringifier:
if len(arguments) != 0:
raise WebIDLError("stringifier has wrong number of arguments",
[self.getLocation(p, 2)])
if not returnType.isDOMString():
raise WebIDLError("stringifier must have DOMString return type",
[self.getLocation(p, 2)])
# identifier might be None. This is only permitted for special methods.
if not identifier:
if (not getter and not setter and not creator and
not deleter and not legacycaller and not stringifier):
raise WebIDLError("Identifier required for non-special methods",
[self.getLocation(p, 2)])
location = BuiltinLocation("<auto-generated-identifier>")
identifier = IDLUnresolvedIdentifier(
location,
"__%s%s%s%s%s%s%s" %
("named" if specialType == IDLMethod.NamedOrIndexed.Named else
"indexed" if specialType == IDLMethod.NamedOrIndexed.Indexed else "",
"getter" if getter else "",
"setter" if setter else "",
"deleter" if deleter else "",
"creator" if creator else "",
"legacycaller" if legacycaller else "",
"stringifier" if stringifier else ""),
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 2), identifier, returnType, arguments,
static=static, getter=getter, setter=setter, creator=creator,
deleter=deleter, specialType=specialType,
legacycaller=legacycaller, stringifier=stringifier)
p[0] = method
def p_Stringifier(self, p):
"""
Operation : STRINGIFIER SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"__stringifier",
allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 1),
identifier,
returnType=BuiltinTypes[IDLBuiltinType.Types.domstring],
arguments=[],
stringifier=True)
p[0] = method
def p_Jsonifier(self, p):
"""
Operation : JSONIFIER SEMICOLON
"""
identifier = IDLUnresolvedIdentifier(BuiltinLocation("<auto-generated-identifier>"),
"__jsonifier", allowDoubleUnderscore=True)
method = IDLMethod(self.getLocation(p, 1),
identifier,
returnType=BuiltinTypes[IDLBuiltinType.Types.object],
arguments=[],
jsonifier=True)
p[0] = method
def p_QualifierStatic(self, p):
"""
Qualifier : STATIC
"""
p[0] = [IDLInterfaceMember.Special.Static]
def p_QualifierStringifier(self, p):
"""
Qualifier : STRINGIFIER
"""
p[0] = [IDLInterfaceMember.Special.Stringifier]
def p_Qualifiers(self, p):
"""
Qualifiers : Qualifier
| Specials
"""
p[0] = p[1]
def p_Specials(self, p):
"""
Specials : Special Specials
"""
p[0] = [p[1]]
p[0].extend(p[2])
def p_SpecialsEmpty(self, p):
"""
Specials :
"""
p[0] = []
def p_SpecialGetter(self, p):
"""
Special : GETTER
"""
p[0] = IDLMethod.Special.Getter
def p_SpecialSetter(self, p):
"""
Special : SETTER
"""
p[0] = IDLMethod.Special.Setter
def p_SpecialCreator(self, p):
"""
Special : CREATOR
"""
p[0] = IDLMethod.Special.Creator
def p_SpecialDeleter(self, p):
"""
Special : DELETER
"""
p[0] = IDLMethod.Special.Deleter
def p_SpecialLegacyCaller(self, p):
"""
Special : LEGACYCALLER
"""
p[0] = IDLMethod.Special.LegacyCaller
def p_OperationRest(self, p):
"""
OperationRest : ReturnType OptionalIdentifier LPAREN ArgumentList RPAREN SEMICOLON
"""
p[0] = (p[1], p[2], p[4])
def p_OptionalIdentifier(self, p):
"""
OptionalIdentifier : IDENTIFIER
"""
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_OptionalIdentifierEmpty(self, p):
"""
OptionalIdentifier :
"""
pass
def p_ArgumentList(self, p):
"""
ArgumentList : Argument Arguments
"""
p[0] = [p[1]] if p[1] else []
p[0].extend(p[2])
def p_ArgumentListEmpty(self, p):
"""
ArgumentList :
"""
p[0] = []
def p_Arguments(self, p):
"""
Arguments : COMMA Argument Arguments
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ArgumentsEmpty(self, p):
"""
Arguments :
"""
p[0] = []
def p_Argument(self, p):
"""
Argument : ExtendedAttributeList Optional Type Ellipsis ArgumentName Default
"""
t = p[3]
assert isinstance(t, IDLType)
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 5), p[5])
optional = p[2]
variadic = p[4]
defaultValue = p[6]
if not optional and defaultValue:
raise WebIDLError("Mandatory arguments can't have a default value.",
[self.getLocation(p, 6)])
# We can't test t.isAny() here and give it a default value as needed,
# since at this point t is not a fully resolved type yet (e.g. it might
# be a typedef). We'll handle the 'any' case in IDLArgument.complete.
if variadic:
if optional:
raise WebIDLError("Variadic arguments should not be marked optional.",
[self.getLocation(p, 2)])
optional = variadic
p[0] = IDLArgument(self.getLocation(p, 5), identifier, t, optional, defaultValue, variadic)
p[0].addExtendedAttributes(p[1])
def p_ArgumentName(self, p):
"""
ArgumentName : IDENTIFIER
| ATTRIBUTE
| CALLBACK
| CONST
| CREATOR
| DELETER
| DICTIONARY
| ENUM
| EXCEPTION
| GETTER
| IMPLEMENTS
| INHERIT
| INTERFACE
| ITERABLE
| LEGACYCALLER
| MAPLIKE
| PARTIAL
| REQUIRED
| SERIALIZER
| SETLIKE
| SETTER
| STATIC
| STRINGIFIER
| JSONIFIER
| TYPEDEF
| UNRESTRICTED
"""
p[0] = p[1]
def p_AttributeName(self, p):
"""
AttributeName : IDENTIFIER
| REQUIRED
"""
p[0] = p[1]
def p_Optional(self, p):
"""
Optional : OPTIONAL
"""
p[0] = True
def p_OptionalEmpty(self, p):
"""
Optional :
"""
p[0] = False
def p_Required(self, p):
"""
Required : REQUIRED
"""
p[0] = True
def p_RequiredEmpty(self, p):
"""
Required :
"""
p[0] = False
def p_Ellipsis(self, p):
"""
Ellipsis : ELLIPSIS
"""
p[0] = True
def p_EllipsisEmpty(self, p):
"""
Ellipsis :
"""
p[0] = False
def p_ExceptionMember(self, p):
"""
ExceptionMember : Const
| ExceptionField
"""
pass
def p_ExceptionField(self, p):
"""
ExceptionField : Type IDENTIFIER SEMICOLON
"""
pass
def p_ExtendedAttributeList(self, p):
"""
ExtendedAttributeList : LBRACKET ExtendedAttribute ExtendedAttributes RBRACKET
"""
p[0] = [p[2]]
if p[3]:
p[0].extend(p[3])
def p_ExtendedAttributeListEmpty(self, p):
"""
ExtendedAttributeList :
"""
p[0] = []
def p_ExtendedAttribute(self, p):
"""
ExtendedAttribute : ExtendedAttributeNoArgs
| ExtendedAttributeArgList
| ExtendedAttributeIdent
| ExtendedAttributeNamedArgList
| ExtendedAttributeIdentList
"""
p[0] = IDLExtendedAttribute(self.getLocation(p, 1), p[1])
def p_ExtendedAttributeEmpty(self, p):
"""
ExtendedAttribute :
"""
pass
def p_ExtendedAttributes(self, p):
"""
ExtendedAttributes : COMMA ExtendedAttribute ExtendedAttributes
"""
p[0] = [p[2]] if p[2] else []
p[0].extend(p[3])
def p_ExtendedAttributesEmpty(self, p):
"""
ExtendedAttributes :
"""
p[0] = []
def p_Other(self, p):
"""
Other : INTEGER
| FLOATLITERAL
| IDENTIFIER
| STRING
| OTHER
| ELLIPSIS
| COLON
| SCOPE
| SEMICOLON
| LT
| EQUALS
| GT
| QUESTIONMARK
| DATE
| DOMSTRING
| BYTESTRING
| USVSTRING
| ANY
| ATTRIBUTE
| BOOLEAN
| BYTE
| LEGACYCALLER
| CONST
| CREATOR
| DELETER
| DOUBLE
| EXCEPTION
| FALSE
| FLOAT
| GETTER
| IMPLEMENTS
| INHERIT
| INTERFACE
| LONG
| MODULE
| NULL
| OBJECT
| OCTET
| OPTIONAL
| SEQUENCE
| MOZMAP
| SETTER
| SHORT
| STATIC
| STRINGIFIER
| JSONIFIER
| TRUE
| TYPEDEF
| UNSIGNED
| VOID
"""
pass
def p_OtherOrComma(self, p):
"""
OtherOrComma : Other
| COMMA
"""
pass
def p_TypeSingleType(self, p):
"""
Type : SingleType
"""
p[0] = p[1]
def p_TypeUnionType(self, p):
"""
Type : UnionType TypeSuffix
"""
p[0] = self.handleModifiers(p[1], p[2])
def p_SingleTypeNonAnyType(self, p):
"""
SingleType : NonAnyType
"""
p[0] = p[1]
def p_SingleTypeAnyType(self, p):
"""
SingleType : ANY TypeSuffixStartingWithArray
"""
p[0] = self.handleModifiers(BuiltinTypes[IDLBuiltinType.Types.any], p[2])
def p_UnionType(self, p):
"""
UnionType : LPAREN UnionMemberType OR UnionMemberType UnionMemberTypes RPAREN
"""
types = [p[2], p[4]]
types.extend(p[5])
p[0] = IDLUnionType(self.getLocation(p, 1), types)
def p_UnionMemberTypeNonAnyType(self, p):
"""
UnionMemberType : NonAnyType
"""
p[0] = p[1]
def p_UnionMemberTypeArrayOfAny(self, p):
"""
UnionMemberTypeArrayOfAny : ANY LBRACKET RBRACKET
"""
p[0] = IDLArrayType(self.getLocation(p, 2),
BuiltinTypes[IDLBuiltinType.Types.any])
def p_UnionMemberType(self, p):
"""
UnionMemberType : UnionType TypeSuffix
| UnionMemberTypeArrayOfAny TypeSuffix
"""
p[0] = self.handleModifiers(p[1], p[2])
def p_UnionMemberTypes(self, p):
"""
UnionMemberTypes : OR UnionMemberType UnionMemberTypes
"""
p[0] = [p[2]]
p[0].extend(p[3])
def p_UnionMemberTypesEmpty(self, p):
"""
UnionMemberTypes :
"""
p[0] = []
def p_NonAnyType(self, p):
"""
NonAnyType : PrimitiveOrStringType TypeSuffix
| ARRAYBUFFER TypeSuffix
| SHAREDARRAYBUFFER TypeSuffix
| OBJECT TypeSuffix
"""
if p[1] == "object":
type = BuiltinTypes[IDLBuiltinType.Types.object]
elif p[1] == "ArrayBuffer":
type = BuiltinTypes[IDLBuiltinType.Types.ArrayBuffer]
elif p[1] == "SharedArrayBuffer":
type = BuiltinTypes[IDLBuiltinType.Types.SharedArrayBuffer]
else:
type = BuiltinTypes[p[1]]
p[0] = self.handleModifiers(type, p[2])
def p_NonAnyTypeSequenceType(self, p):
"""
NonAnyType : SEQUENCE LT Type GT Null
"""
innerType = p[3]
type = IDLSequenceType(self.getLocation(p, 1), innerType)
if p[5]:
type = IDLNullableType(self.getLocation(p, 5), type)
p[0] = type
# Note: Promise<void> is allowed, so we want to parametrize on
# ReturnType, not Type. Also, we want this to end up picking up
# the Promise interface for now, hence the games with IDLUnresolvedType.
def p_NonAnyTypePromiseType(self, p):
"""
NonAnyType : PROMISE LT ReturnType GT Null
"""
innerType = p[3]
promiseIdent = IDLUnresolvedIdentifier(self.getLocation(p, 1), "Promise")
type = IDLUnresolvedType(self.getLocation(p, 1), promiseIdent, p[3])
if p[5]:
type = IDLNullableType(self.getLocation(p, 5), type)
p[0] = type
def p_NonAnyTypeMozMapType(self, p):
"""
NonAnyType : MOZMAP LT Type GT Null
"""
innerType = p[3]
type = IDLMozMapType(self.getLocation(p, 1), innerType)
if p[5]:
type = IDLNullableType(self.getLocation(p, 5), type)
p[0] = type
def p_NonAnyTypeScopedName(self, p):
"""
NonAnyType : ScopedName TypeSuffix
"""
assert isinstance(p[1], IDLUnresolvedIdentifier)
if p[1].name == "Promise":
raise WebIDLError("Promise used without saying what it's "
"parametrized over",
[self.getLocation(p, 1)])
type = None
try:
if self.globalScope()._lookupIdentifier(p[1]):
obj = self.globalScope()._lookupIdentifier(p[1])
assert not obj.isType()
if obj.isTypedef():
type = IDLTypedefType(self.getLocation(p, 1), obj.innerType,
obj.identifier.name)
elif obj.isCallback() and not obj.isInterface():
type = IDLCallbackType(self.getLocation(p, 1), obj)
else:
type = IDLWrapperType(self.getLocation(p, 1), p[1])
p[0] = self.handleModifiers(type, p[2])
return
except:
pass
type = IDLUnresolvedType(self.getLocation(p, 1), p[1])
p[0] = self.handleModifiers(type, p[2])
def p_NonAnyTypeDate(self, p):
"""
NonAnyType : DATE TypeSuffix
"""
p[0] = self.handleModifiers(BuiltinTypes[IDLBuiltinType.Types.date],
p[2])
def p_ConstType(self, p):
"""
ConstType : PrimitiveOrStringType Null
"""
type = BuiltinTypes[p[1]]
if p[2]:
type = IDLNullableType(self.getLocation(p, 1), type)
p[0] = type
def p_ConstTypeIdentifier(self, p):
"""
ConstType : IDENTIFIER Null
"""
identifier = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
type = IDLUnresolvedType(self.getLocation(p, 1), identifier)
if p[2]:
type = IDLNullableType(self.getLocation(p, 1), type)
p[0] = type
def p_PrimitiveOrStringTypeUint(self, p):
"""
PrimitiveOrStringType : UnsignedIntegerType
"""
p[0] = p[1]
def p_PrimitiveOrStringTypeBoolean(self, p):
"""
PrimitiveOrStringType : BOOLEAN
"""
p[0] = IDLBuiltinType.Types.boolean
def p_PrimitiveOrStringTypeByte(self, p):
"""
PrimitiveOrStringType : BYTE
"""
p[0] = IDLBuiltinType.Types.byte
def p_PrimitiveOrStringTypeOctet(self, p):
"""
PrimitiveOrStringType : OCTET
"""
p[0] = IDLBuiltinType.Types.octet
def p_PrimitiveOrStringTypeFloat(self, p):
"""
PrimitiveOrStringType : FLOAT
"""
p[0] = IDLBuiltinType.Types.float
def p_PrimitiveOrStringTypeUnrestictedFloat(self, p):
"""
PrimitiveOrStringType : UNRESTRICTED FLOAT
"""
p[0] = IDLBuiltinType.Types.unrestricted_float
def p_PrimitiveOrStringTypeDouble(self, p):
"""
PrimitiveOrStringType : DOUBLE
"""
p[0] = IDLBuiltinType.Types.double
def p_PrimitiveOrStringTypeUnrestictedDouble(self, p):
"""
PrimitiveOrStringType : UNRESTRICTED DOUBLE
"""
p[0] = IDLBuiltinType.Types.unrestricted_double
def p_PrimitiveOrStringTypeDOMString(self, p):
"""
PrimitiveOrStringType : DOMSTRING
"""
p[0] = IDLBuiltinType.Types.domstring
def p_PrimitiveOrStringTypeBytestring(self, p):
"""
PrimitiveOrStringType : BYTESTRING
"""
p[0] = IDLBuiltinType.Types.bytestring
def p_PrimitiveOrStringTypeUSVString(self, p):
"""
PrimitiveOrStringType : USVSTRING
"""
p[0] = IDLBuiltinType.Types.usvstring
def p_UnsignedIntegerTypeUnsigned(self, p):
"""
UnsignedIntegerType : UNSIGNED IntegerType
"""
# Adding one to a given signed integer type gets you the unsigned type:
p[0] = p[2] + 1
def p_UnsignedIntegerType(self, p):
"""
UnsignedIntegerType : IntegerType
"""
p[0] = p[1]
def p_IntegerTypeShort(self, p):
"""
IntegerType : SHORT
"""
p[0] = IDLBuiltinType.Types.short
def p_IntegerTypeLong(self, p):
"""
IntegerType : LONG OptionalLong
"""
if p[2]:
p[0] = IDLBuiltinType.Types.long_long
else:
p[0] = IDLBuiltinType.Types.long
def p_OptionalLong(self, p):
"""
OptionalLong : LONG
"""
p[0] = True
def p_OptionalLongEmpty(self, p):
"""
OptionalLong :
"""
p[0] = False
def p_TypeSuffixBrackets(self, p):
"""
TypeSuffix : LBRACKET RBRACKET TypeSuffix
"""
p[0] = [(IDLMethod.TypeSuffixModifier.Brackets, self.getLocation(p, 1))]
p[0].extend(p[3])
def p_TypeSuffixQMark(self, p):
"""
TypeSuffix : QUESTIONMARK TypeSuffixStartingWithArray
"""
p[0] = [(IDLMethod.TypeSuffixModifier.QMark, self.getLocation(p, 1))]
p[0].extend(p[2])
def p_TypeSuffixEmpty(self, p):
"""
TypeSuffix :
"""
p[0] = []
def p_TypeSuffixStartingWithArray(self, p):
"""
TypeSuffixStartingWithArray : LBRACKET RBRACKET TypeSuffix
"""
p[0] = [(IDLMethod.TypeSuffixModifier.Brackets, self.getLocation(p, 1))]
p[0].extend(p[3])
def p_TypeSuffixStartingWithArrayEmpty(self, p):
"""
TypeSuffixStartingWithArray :
"""
p[0] = []
def p_Null(self, p):
"""
Null : QUESTIONMARK
|
"""
if len(p) > 1:
p[0] = True
else:
p[0] = False
def p_ReturnTypeType(self, p):
"""
ReturnType : Type
"""
p[0] = p[1]
def p_ReturnTypeVoid(self, p):
"""
ReturnType : VOID
"""
p[0] = BuiltinTypes[IDLBuiltinType.Types.void]
def p_ScopedName(self, p):
"""
ScopedName : AbsoluteScopedName
| RelativeScopedName
"""
p[0] = p[1]
def p_AbsoluteScopedName(self, p):
"""
AbsoluteScopedName : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_RelativeScopedName(self, p):
"""
RelativeScopedName : IDENTIFIER ScopedNameParts
"""
assert not p[2] # Not implemented!
p[0] = IDLUnresolvedIdentifier(self.getLocation(p, 1), p[1])
def p_ScopedNameParts(self, p):
"""
ScopedNameParts : SCOPE IDENTIFIER ScopedNameParts
"""
assert False
pass
def p_ScopedNamePartsEmpty(self, p):
"""
ScopedNameParts :
"""
p[0] = None
def p_ExtendedAttributeNoArgs(self, p):
"""
ExtendedAttributeNoArgs : IDENTIFIER
"""
p[0] = (p[1],)
def p_ExtendedAttributeArgList(self, p):
"""
ExtendedAttributeArgList : IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeIdent(self, p):
"""
ExtendedAttributeIdent : IDENTIFIER EQUALS STRING
| IDENTIFIER EQUALS IDENTIFIER
"""
p[0] = (p[1], p[3])
def p_ExtendedAttributeNamedArgList(self, p):
"""
ExtendedAttributeNamedArgList : IDENTIFIER EQUALS IDENTIFIER LPAREN ArgumentList RPAREN
"""
p[0] = (p[1], p[3], p[5])
def p_ExtendedAttributeIdentList(self, p):
"""
ExtendedAttributeIdentList : IDENTIFIER EQUALS LPAREN IdentifierList RPAREN
"""
p[0] = (p[1], p[4])
def p_IdentifierList(self, p):
"""
IdentifierList : IDENTIFIER Identifiers
"""
idents = list(p[2])
idents.insert(0, p[1])
p[0] = idents
def p_IdentifiersList(self, p):
"""
Identifiers : COMMA IDENTIFIER Identifiers
"""
idents = list(p[3])
idents.insert(0, p[2])
p[0] = idents
def p_IdentifiersEmpty(self, p):
"""
Identifiers :
"""
p[0] = []
def p_error(self, p):
if not p:
raise WebIDLError("Syntax Error at end of file. Possibly due to missing semicolon(;), braces(}) or both",
[self._filename])
else:
raise WebIDLError("invalid syntax", [Location(self.lexer, p.lineno, p.lexpos, self._filename)])
def __init__(self, outputdir='', lexer=None):
Tokenizer.__init__(self, outputdir, lexer)
logger = SqueakyCleanLogger()
self.parser = yacc.yacc(module=self,
outputdir=outputdir,
tabmodule='webidlyacc',
errorlog=logger,
debug=False
# Pickling the grammar is a speedup in
# some cases (older Python?) but a
# significant slowdown in others.
# We're not pickling for now, until it
# becomes a speedup again.
# , picklefile='WebIDLGrammar.pkl'
)
logger.reportGrammarErrors()
self._globalScope = IDLScope(BuiltinLocation("<Global Scope>"), None, None)
# To make our test harness work, pretend like we have a primary global already.
# Note that we _don't_ set _globalScope.primaryGlobalAttr,
# so we'll still be able to detect multiple PrimaryGlobal extended attributes.
self._globalScope.primaryGlobalName = "FakeTestPrimaryGlobal"
self._globalScope.globalNames.add("FakeTestPrimaryGlobal")
self._globalScope.globalNameMapping["FakeTestPrimaryGlobal"].add("FakeTestPrimaryGlobal")
# And we add the special-cased "System" global name, which
# doesn't have any corresponding interfaces.
self._globalScope.globalNames.add("System")
self._globalScope.globalNameMapping["System"].add("BackstagePass")
self._installBuiltins(self._globalScope)
self._productions = []
self._filename = "<builtin>"
self.lexer.input(Parser._builtins)
self._filename = None
self.parser.parse(lexer=self.lexer, tracking=True)
def _installBuiltins(self, scope):
assert isinstance(scope, IDLScope)
# xrange omits the last value.
for x in xrange(IDLBuiltinType.Types.ArrayBuffer, IDLBuiltinType.Types.Float64Array + 1):
builtin = BuiltinTypes[x]
name = builtin.name
typedef = IDLTypedef(BuiltinLocation("<builtin type>"), scope, builtin, name)
@ staticmethod
def handleModifiers(type, modifiers):
for (modifier, modifierLocation) in modifiers:
assert (modifier == IDLMethod.TypeSuffixModifier.QMark or
modifier == IDLMethod.TypeSuffixModifier.Brackets)
if modifier == IDLMethod.TypeSuffixModifier.QMark:
type = IDLNullableType(modifierLocation, type)
elif modifier == IDLMethod.TypeSuffixModifier.Brackets:
type = IDLArrayType(modifierLocation, type)
return type
def parse(self, t, filename=None):
self.lexer.input(t)
# for tok in iter(self.lexer.token, None):
# print tok
self._filename = filename
self._productions.extend(self.parser.parse(lexer=self.lexer, tracking=True))
self._filename = None
def finish(self):
# If we have interfaces that are iterable, create their
# iterator interfaces and add them to the productions array.
interfaceStatements = [p for p in self._productions if
isinstance(p, IDLInterface)]
iterableIteratorIface = None
for iface in interfaceStatements:
iterable = None
# We haven't run finish() on the interface yet, so we don't know
# whether our interface is maplike/setlike/iterable or not. This
# means we have to loop through the members to see if we have an
# iterable member.
for m in iface.members:
if isinstance(m, IDLIterable):
iterable = m
break
if iterable and iterable.isPairIterator():
def simpleExtendedAttr(str):
return IDLExtendedAttribute(iface.location, (str, ))
nextMethod = IDLMethod(
iface.location,
IDLUnresolvedIdentifier(iface.location, "next"),
BuiltinTypes[IDLBuiltinType.Types.object], [])
nextMethod.addExtendedAttributes([simpleExtendedAttr("Throws")])
itr_ident = IDLUnresolvedIdentifier(iface.location,
iface.identifier.name + "Iterator")
itr_iface = IDLInterface(iface.location, self.globalScope(),
itr_ident, None, [nextMethod],
isKnownNonPartial=True)
itr_iface.addExtendedAttributes([simpleExtendedAttr("NoInterfaceObject")])
# Make sure the exposure set for the iterator interface is the
# same as the exposure set for the iterable interface, because
# we're going to generate methods on the iterable that return
# instances of the iterator.
itr_iface._exposureGlobalNames = set(iface._exposureGlobalNames)
# Always append generated iterable interfaces after the
# interface they're a member of, otherwise nativeType generation
# won't work correctly.
itr_iface.iterableInterface = iface
self._productions.append(itr_iface)
iterable.iteratorType = IDLWrapperType(iface.location, itr_iface)
# Then, finish all the IDLImplementsStatements. In particular, we
# have to make sure we do those before we do the IDLInterfaces.
# XXX khuey hates this bit and wants to nuke it from orbit.
implementsStatements = [p for p in self._productions if
isinstance(p, IDLImplementsStatement)]
otherStatements = [p for p in self._productions if
not isinstance(p, IDLImplementsStatement)]
for production in implementsStatements:
production.finish(self.globalScope())
for production in otherStatements:
production.finish(self.globalScope())
# Do any post-finish validation we need to do
for production in self._productions:
production.validate()
# De-duplicate self._productions, without modifying its order.
seen = set()
result = []
for p in self._productions:
if p not in seen:
seen.add(p)
result.append(p)
return result
def reset(self):
return Parser(lexer=self.lexer)
# Builtin IDL defined by WebIDL
_builtins = """
typedef unsigned long long DOMTimeStamp;
typedef (ArrayBufferView or ArrayBuffer) BufferSource;
"""
def main():
# Parse arguments.
from optparse import OptionParser
usageString = "usage: %prog [options] files"
o = OptionParser(usage=usageString)
o.add_option("--cachedir", dest='cachedir', default=None,
help="Directory in which to cache lex/parse tables.")
o.add_option("--verbose-errors", action='store_true', default=False,
help="When an error happens, display the Python traceback.")
(options, args) = o.parse_args()
if len(args) < 1:
o.error(usageString)
fileList = args
baseDir = os.getcwd()
# Parse the WebIDL.
parser = Parser(options.cachedir)
try:
for filename in fileList:
fullPath = os.path.normpath(os.path.join(baseDir, filename))
f = open(fullPath, 'rb')
lines = f.readlines()
f.close()
print fullPath
parser.parse(''.join(lines), fullPath)
parser.finish()
except WebIDLError, e:
if options.verbose_errors:
traceback.print_exc()
else:
print e
if __name__ == '__main__':
main()
| mpl-2.0 |
mrdanbrooks/minibus | minibus.py | 2 | 37304 | # Copyright 2015 Dan Brooks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" minibus """
# This is a port of the minibus2.py file from homesec, with some additional comments
# for adding service functionality
import logging
import logging.handlers
import re
import ujson as json
try:
# Much faster then built-in json
# http://artem.krylysov.com/blog/2015/09/29/benchmark-python-json-libraries/
import ujson as json
except ImportError:
import json
import jsonschema
# TODO: We spend ALOT of time validating messages. This can be improved.
# We should allow publishers, subscribers, and services to optionally NOT TEST
# every message sent/received. However, before implementing this we should
# establish some kind of a "distributed master" that tracks who is doing what
# using our hidden topic names, and check that topic schemas at least line up.
import os
import random
import uuid
import netifaces as ni
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
try:
import gnupg # Install with python-gnupg or py27-gnupg
HAS_GNUPG = True
except ImportError:
HAS_GNUPG = False
try:
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor, defer
HAS_TWISTED = True
except ImportError:
HAS_TWISTED = False
class MiniBusClientAPI(object):
""" Defines the public API for interacting with the minibus """
def __init__(self, name, cryptokey=None):
""" Clients have a defined name """
pass
def publisher(self, topic_name, data_format):
raise NotImplementedError()
def subscribe(self, name_pattern, data_format, callback, headers=False):
# Should return a reference to be used to remove the subscriber
raise NotImplementedError()
def unsubscribe(self, name_pattern, callback):
raise NotImplementedError()
def service_client(self, name, reqst_schema, reply_schema, reply_cb, err_cb):
# This should set up resources for sending and receiving data with remote service
# requester(params): Sends data to remote service. Returns srvid value
# receiver(srvid, callback()
raise NotImplementedError()
def service_func_client(self, name, reqst_schema, reply_schema):
""" Returns a function that behaves like a local function.
retval = proxyfunc(params)
"""
raise NotImplementedError()
def service_server(self, name, reqst_schema, reply_schema, func):
""" Provides a named network service. Service work is received by the
func parameter, including an srvid value and received parameters.
The srvid value is used to identify the client's "work order" information
and must be passed to one of the two reply functions.
Service returns value to client when a value is passed to either
service_server_return() or service_server_error().
"""
raise NotImplementedError()
def service_func_server(self, name, reqst_schema, reply_schema, func):
""" Provides a named network service linked to a local function.
my_func(params...)
Client receives the value returned by the function.
This is a convenience function that wraps the functionality of service_server()
around a single function which returns a value to be sent back to the client.
"""
raise NotImplementedError()
def service_server_return(self, srvid, value):
""" Used by a service server to send a return value to a service client """
raise NotImplementedError()
def service_server_error(self, srvid, value):
""" Used by a service server to send an error value to a service client """
raise NotImplementedError()
data_header = {
"title": "Data Header Object",
"type": "object",
"properties": {
"topic": {"type": "string"},
"author": {"type": "string"},
"gpg": {"type": "string"},
"idstr": {"type": "string"},
"version": {"enum": ["0.3.0"]}
},
"required": ["topic"]
}
busschema = {
"title": "MiniBus Schema 0.3.0",
"type": "object",
"properties": {
"header": data_header,
"data": {"type": "string"}, # This is serialized json data
},
"required": ["header", "data"],
"additionalProperties": False
}
class MiniBusClientCoreServices(object):
""" Embedded Services """
def __init__(self):
self.service_func_server("/__minibus__/__listclients__",
{"type": "null"},
{"type": "string"},
self.__get_name)
self.service_func_server("/__minibus__/__publishers__",
{"type": "null"},
{"type": "array"},
self.__get_publishers)
self.service_func_server("/__minibus__/%s/__publishers__" % self._clientname,
{"type": "null"},
{"type": "array"},
self.__get_publishers)
self.service_func_server("/__minibus__/__subscribers__",
{"type": "null"},
{"type": "array"},
self.__get_subscribers)
self.service_func_server("/__minibus__/%s/__subscribers__" % self._clientname,
{"type": "null"},
{"type": "array"},
self.__get_subscribers)
self.service_func_server("/__minibus__/__service_servers__",
{"type": "null"},
{"type": "array"},
self.__get_service_servers)
self.service_func_server("/__minibus__/%s/__service_servers__" % self._clientname,
{"type": "null"},
{"type": "array"},
self.__get_service_servers)
self.service_func_server("/__minibus__/__service_clients__",
{"type": "null"},
{"type": "array"},
self.__get_service_clients)
self.service_func_server("/__minibus__/%s/__service_clients__" % self._clientname,
{"type": "null"},
{"type": "array"},
self.__get_service_clients)
self.service_func_server("/__minibus__/%s/__hostname__" % self._clientname,
{"type": "null"},
{"type": "string"},
self.__get_hostname)
self.service_func_server("/__minibus__/%s/__pid__" % self._clientname,
{"type": "null"},
{"type": "integer"},
self.__get_pid)
def __get_name(self, params):
return self._clientname
def __get_hostname(self, params):
import socket
return socket.gethostname()
def __get_pid(self, params):
return os.getpid()
def __get_publishers(self, params):
""" Service call function that returns a list of regular topic publishers by this client,
not including topics related to services.
"""
pubs = copy.deepcopy(self._publishers)
# Remove publishers for services
pubs = [p for p in pubs if p.split("/")[-1] not in ["__request__", "__reply__", "__error__"]]
return pubs
def __get_subscribers(self, params):
# Subscription topics are stored in compiled regular expressions.
# They are automatically bounded by the characters ^ and $ to delimit the string.
# Turn keys into strings and remove the bounding characters so we can see the original string
subs = [p.pattern[1:-1] for p in self._subscriptions.keys()]
# Remove subscriptions for services
subs = [s for s in subs if s.split("/")[-1] not in ["__request__", "__reply__", "__error__"]]
return subs
def __get_service_servers(self, params):
subs = [p.pattern[1:-1] for p in self._subscriptions.keys()]
subs = [s for s in subs if s.split("/")[-1] == "__request__"]
# Just get the base name without the __request__, __reply__, __error__
subs = [p[:p.rfind("/")] for p in subs]
# Filter out Built-in services
subs = [s for s in subs if not re.match("^/__minibus__/", s)]
return subs
def __get_service_clients(self, params):
pubs = copy.deepcopy(self._publishers)
pubs = [p[:p.rfind("/")] for p in pubs if p.split("/")[-1] == "__request__"]
# Filter out Built-in services
pubs = [p for p in pubs if not re.match("^/__minibus__/", p)]
return pubs
class MiniBusClientCore(MiniBusClientAPI, MiniBusClientCoreServices):
#pylint: disable=no-self-use,no-member
def __init__(self, name=None, iface=None, cryptokey=None):
# Set up Logging Mechanism
self._logger = MBLagerLogger("MiniBus")
self._logger.console(INFO)
self._logger.debug("Using module '%s' for messages" % json.__name__)
MiniBusClientAPI.__init__(self, name, iface)
self._iface = iface
self._clientname = name if name else str(uuid.uuid4())
self._cryptokey = cryptokey
if cryptokey:
if not HAS_GNUPG:
raise Exception("cryptokey was provided, but gnupg module not installed.")
self._gpg = gnupg.GPG()
self._subscriptions = dict() # topicname(regex): callbacks(list(func))
# topic patterns (subscriptions and publications) must have a single schema
self._topic_schemas = dict() # topicname(regex): jsonschema(dict)
self._publishers = list() # List of published topics: only for keeping track to be reported by __get_publishers
# Services
self._service_server_requests = dict() # requestid(str): "servicetopicsharedname/"
# Make sure our schema is good
jsonschema.Draft4Validator.check_schema(busschema)
# Start common services - this should only be done after everything else is loaded.
MiniBusClientCoreServices.__init__(self)
def _get_iface_ip(self):
""" Returns the ip address for a named interface """
if self._iface not in ni.interfaces():
raise Exception("Interface %s not found" % self._iface)
return ni.ifaddresses(self._iface)[ni.AF_INET][0]['addr']
def _get_name_pattern(self, name_pattern):
""" automatically adds a ^ to begining and $ to end of name_patterns if needed """
if not name_pattern[0] == '^':
name_pattern = '^' + name_pattern
if not name_pattern[-1] == '$':
name_pattern = name_pattern + '$'
# Make sure topic_name is a valid regex
pattern = re.compile(name_pattern)
return pattern
def _encrypt_data(self, plaintext):
""" Encrypts data using gpg symmetric armored text. This is pretty bad and should be replaced """
if not HAS_GNUPG:
raise Exception("Attempting to encrypted packet, but I don't have gnupg")
crypt = self._gpg.encrypt(plaintext, recipients=None, symmetric=True,
passphrase=self._cryptokey, armor=True)
ciphertext = [x for x in crypt.data.split('\n') if len(x) > 0]
# Remove the first and last lines
# First line is -----BEGIN PGP MESSAGE-----
# Last line is -----END PGP MESSAGE-----
ciphertext = ciphertext[1:-1]
# Second line is possibly Version: GnuPG vX.X.X - remove it
if len(ciphertext) > 0 and len(ciphertext[0].strip()) > 0:
if ciphertext[0].strip().lower()[:7] == "version":
ciphertext = ciphertext[1:]
# Combines all the lines into one long string of text
return "".join(ciphertext)
def _decrypt_data(self, ciphertext):
""" Takes packet with armored symmetric gpg data and replaces it with plain text """
# TODO: Instead of encrypting just the data in the message, we should probably be wrapping everything?
if not HAS_GNUPG:
raise Exception("Received encrypted packet that I can't decrypt")
if not self._cryptokey:
raise Exception("Received encrypted packet, but I don't have a key")
# We are just sending the ciphertext and not the full message, so we need
# to reconstructe it here before we can decrypt it
ciphertext = "-----BEGIN PGP MESSAGE-----\n\n%s\n-----END PGP MESSAGE-----" % ciphertext
plaintext = self._gpg.decrypt(ciphertext, passphrase=self._cryptokey)
return plaintext
def recv_packet(self, datagram):
self._logger.debug("Received datagram=%s" % datagram)
# Check for empty data packets
if len(datagram.strip()) == 0:
self._logger.debug("Datagram was empty")
return
# Deserialize packet
packet = json.loads(datagram)
# Check packet schema
jsonschema.validate(packet, busschema)
topic = packet["header"]["topic"]
header = packet["header"]
data = packet["data"]
for pattern, callbacks in self._subscriptions.items():
if pattern.match(topic):
# Make sure encapsulated data matches user specified schema
user_schema = self._topic_schemas[pattern]
self._logger.debug("Found matching pattern %s that will use schema %s "
% (pattern.pattern, user_schema))
# Decrypt data
if "gpg" in packet["header"]:
data = self._decrypt_data(data)
data = data.data
# Deserialize data
data = json.loads(data)
# Validate data
jsonschema.validate(data, user_schema)
# push data to all the callbacks for this pattern (in a random order)
index_order = range(len(callbacks))
random.shuffle(index_order)
for i in index_order:
# callbacks[i](header, data)
#NOTE: This was changed out for the above because socket
# implementation of service_func_client would hang waiting
# for a response in mbtt. This fixes it, although I have not
# traced the exact reason why the above was not working
self._run_callback(callbacks[i], header, data)
def send_packet(self, datagram):
raise NotImplementedError()
def _publish(self, name, idstr, data):
""" Serialize data and publish on topic of given name.
This function is wrapped in a lambda expression and returned by publisher()
"""
# Make sure this data conforms to user schema
self._logger.debug("Attempting to publish %s" % data)
for pattern, schema in self._topic_schemas.items():
if pattern.match(name):
self._logger.debug("found matching pattern %s" % pattern.pattern)
jsonschema.validate(data, schema)
# Serialize
data = json.dumps(data)
# Encrypt
if self._cryptokey:
data = self._encrypt_data(data)
# Packetize
packet = {"header": {"topic": name, "author": self._clientname, "idstr": idstr}, "data": data}
if self._cryptokey:
packet["header"]["gpg"] = 'yes'
jsonschema.validate(packet, busschema)
# Serialize
packet = json.dumps(packet)
# NOTE: This assumes all data is being sent locally over the control bus.
# it needs updated to transmit over specific tcp ports eventually
self.send_packet(packet)
self._logger.debug("Packet sent!")
def subscribe(self, name_pattern, data_format, callback, headers=False):
""" Instructs client to listen to topic matching 'topic_name'.
name_pattern (str): regex to match topic name against
data_format (dict): jsonschema to validate incomming data types
callback (func): function that will be called to receive the data
headers: When True, callback function should have signature func(headers, data),
otherwise func(data)
"""
pattern = self._get_name_pattern(name_pattern)
# Make sure topic_type is a valid schema and matches existing pattern definitions
# Within a client these must be consistant, however different clients can define
# less strict schema to match against
jsonschema.Draft4Validator.check_schema(data_format)
if pattern not in self._topic_schemas:
self._topic_schemas[pattern] = data_format
elif not data_format == self._topic_schemas[pattern]:
raise Exception("Conflicting schema already exists for %s" % name_pattern)
# If are not already listening to this pattern, create a callback list
if pattern not in self._subscriptions:
self._subscriptions[pattern] = list()
# Make sure we don't have multiple of the same callback
if callback in self._subscriptions[pattern]:
raise Exception("Callback %s already registered for subscription %s"
% (str(callback), name_pattern))
# If we don't want to deal with header data, wrap the callback function
# before adding it to the subscriptions list
if headers:
self._subscriptions[pattern].append(callback)
else:
simple = lambda header, data, func=callback: callback(data)
self._subscriptions[pattern].append(simple)
def unsubscribe(self, name_pattern, callback):
pattern = self._get_name_pattern(name_pattern)
try:
self._subscriptions[pattern].remove(callback)
except ValueError:
print "callback not found for this topic"
def publisher(self, topic_name, data_format):
"""
Returns a function to publish data on this topic.
NOTE: This does not check types over the network
"""
pattern = self._get_name_pattern(topic_name)
jsonschema.Draft4Validator.check_schema(data_format)
if pattern in self._topic_schemas:
if not data_format == self._topic_schemas[pattern]:
raise Exception("Conflicting schema already exists for %s" % topic_name)
else:
self._topic_schemas[pattern] = data_format
# Keep track of publishers in a list so we can tell people what we publish
# to at a later time
self._publishers.append(topic_name)
return lambda data: self._publish(topic_name, str(uuid.uuid4()), data)
########################
## Services Functions ##
########################
def _srv_namespacing(self, name):
""" Returns tuple of service topic names (request, reply, error)
Services share a common namespace <name> with three well known topic
names inside it.
"""
if not (isinstance(name, str) or isinstance(name, unicode)):
raise Exception("Service name must be a string, not '%s'" % str(type(name)))
# Add a / to the end if it doesn't already exist
name += "/" if not name[-1] == "/" else ""
return (name + "__request__", name + "__reply__", name + "__error__")
def service_func_server(self, name, reqst_schema, reply_schema, func):
def _srv_fun(reqstid, params, func):
retval = func(params)
self.service_server_return(reqstid, retval)
srv_fun = lambda reqstid, params, f=func: _srv_fun(reqstid, params, f)
self.service_server(name, reqst_schema, reply_schema, srv_fun)
def service_server(self, name, reqst_schema, reply_schema, func):
# TODO: add information to a list of services this node provides?
def _srv_cb(headers, reqst_data, func):
try:
# Save the work request with the topic you received it on by
# striping the "__request__" part off the topic string, saving the /
self._service_server_requests[headers["idstr"]] = headers["topic"][:-11]
func(headers["idstr"], reqst_data)
except Exception as e:
self.service_server_error(headers["idstr"], str(e))
raise e
else:
# TODO: test if work id still exists. If it does, set a timer
# to check on it again. If it still exists after the timer expires,
# do the service_server_error() at that point in time and remove it.
pass
# Get topic names
request_topic, reply_topic, error_topic = self._srv_namespacing(name)
# Create publishers for sending reply to service clients
# NOTE: Even if we don't ever use these again, it checks the schemas to
# make sure they are well formed and registers the schema to the topic
self.publisher(reply_topic, reply_schema)
self.publisher(error_topic, { }) # TODO: Create a service error schema
# Wrap the initial service function in a try/except, and subscribe it to
# the request topic to listen for incoming queries
srv_cb = lambda headers, reqst_data, f=func: _srv_cb(headers, reqst_data, f)
self.subscribe(request_topic, reqst_schema, srv_cb, headers=True)
def service_server_return(self, reqstid, value):
# Normally we should not directly use the _publish command because it makes
# assumptions that the topic being published on has been initialized by
# a call to self.publisher() first. But we do that and this feels cleaner
# then keeping track of extra publisher objects
# Remove the service request from the list and use it to generate the topic
# name we will reply on
reply_topic = self._srv_namespacing(self._service_server_requests.pop(reqstid))[1]
self._publish(reply_topic, reqstid, value)
def service_server_error(self, reqstid, value):
# Normally we should not directly use the _publish command because it makes
# assumptions that the topic being published on has been initialized by
# a call to self.publisher() first. But we do that and this feels cleaner
# then keeping track of extra publisher objects
# Remove the service request from the list and use it to generate the topic
# name we will reply on
error_topic = self._srv_namespacing(self._service_server_requests.pop(reqstid))[2]
self._publish(error_topic, reqstid, value)
def service_client(self, name, reqst_schema, reply_schema, reply_cb, err_cb):
def _srv_request(data, topic_name):
""" This is similar to what a publisher returns, except this gives you the uuid back """
#TODO: publishers don't return anything normally, should we just do this all the time
# instead of making it a special case here?
reqstid = str(uuid.uuid4())
request_topic = self._srv_namespacing(name)[0]
self._publish(request_topic, reqstid, data)
return reqstid
# Get topic names
request_topic, reply_topic, error_topic = self._srv_namespacing(name)
# Create publishers and subscribers. We don't keep the publishers but
# this initializes them and registers their schemas
self.publisher(request_topic, reqst_schema)
reply_cb_wrapper = lambda headers, data, cb_func=reply_cb: cb_func(headers["idstr"], data)
err_cb_wrapper = lambda headers, data, cb_func=err_cb: cb_func(headers["idstr"], data)
self.subscribe(reply_topic, reply_schema, reply_cb_wrapper, headers=True)
self.subscribe(error_topic, { }, err_cb_wrapper, headers=True)
# Return a function to "call" the service with
return lambda data, name=name: _srv_request(data, name)
if HAS_TWISTED:
class MiniBusTwistedClient(MiniBusClientCore):
""" Twisted client for MiniBus.
This class tries to hide most if not all of the twisted api specific things. """
class MBDatagramProtocol(DatagramProtocol):
""" This is the twisted DatagramProtocol for connecting """
def __init__(self, client):
self.mbclient = client
def startProtocol(self):
# Listen Locally
self.transport.joinGroup("228.0.0.5")
# If other interfaces are specified, listen to those as well
# NOTE: This seems to work in linux, but maybe not in OSX? Unclear
listening_addresses = list()
if isinstance(self.mbclient._iface, str):
listening_addresses.append(self.mbclient._get_iface_ip())
elif isinstance(self.mbclient._iface, list):
for iface in self.mbclinet._iface:
listening_addresses.append(self._get_iface_ip(iface))
for ipaddr in listening_addresses:
self.transport.joinGroup("228.0.0.5", ipaddr)
def datagramReceived(self, datagram, address):
self.mbclient.recv_packet(datagram)
def __init__(self, name=None, cryptokey=None):
MiniBusClientCore.__init__(self, name=name, cryptokey=cryptokey)
self.datagram_protocol = MiniBusTwistedClient.MBDatagramProtocol(self)
# If there is a fini() defined, run it at shutdown
if hasattr(self, "fini"):
reactor.addSystemEventTrigger('during', 'shutdown', self.fini)
# Start multicast datagram receiver
self._multicastListener = reactor.listenMulticast(8005,
self.datagram_protocol,
listenMultiple=True)
# If there is an init() defined, call it as soon as the reactor starts up.
# If there is a run() defined, call it when the reactor starts up
if hasattr(self, "run") and callable(getattr(self, "run")):
reactor.callInThread(self.run)
def service_func_client(self, name, reqst_schema, reply_schema):
""" This implementation is somewhat specific to twisted """
class ServiceFuncClient(object):
def __init__(self, mbclient, name, reqst_schema, reply_schema):
self.mbclient = mbclient
self.name = name
self.callpub = self.mbclient.service_client(name, reqst_schema, reply_schema, self.reply_cb, self.err_cb)
self._service_callbacks = dict()
def reply_cb(self, idstr, data):
if idstr in self._service_callbacks.keys():
self._service_callbacks[idstr].callback(data)
#TODO: If thing not listed, set timer and try again
def err_cb(self, idstr, data):
#TODO: Implement this
self.reply_cb(idstr, data) # Punt by sending error data to reply callback
raise Exception("Implement Twisted Service Client Errors")
@defer.inlineCallbacks
def __call__(self, data):
idstr = self.callpub(data)
d = defer.Deferred()
self._service_callbacks[idstr] = d
# Wait for reply
ret = yield d
self._service_callbacks.pop(idstr)
defer.returnValue(ret)
return ServiceFuncClient(self, name, reqst_schema, reply_schema)
def send_packet(self, data):
self._logger.debug("Asserting that we are running")
self._assert_running()
self._logger.debug("Writing to transport")
self.datagram_protocol.transport.write(data, ("228.0.0.5", 8005))
self._logger.debug("Finished")
def _assert_running(self):
""" Check to make sure the reactor is running before continuing """
if not reactor.running:
raise Exception("This API call must only be called after start()")
def _run_callback(self, cb, header, data):
reactor.callLater(0, cb, header, data)
def exec_(self):
reactor.run()
def _cleanup(self):
# This was found via trail and error using suggestions from
# http://blackjml.livejournal.com/23029.html
listenerDeferred = self._multicastListener.stopListening()
return listenerDeferred
def exit_(self):
self._cleanup()
self._logger.debug("Disconnecting MiniBus Client")
reactor.stop()
@staticmethod
def inlineServiceCallbacks(fun):
""" Used to decorate functions that use service callbacks """
return defer.inlineCallbacks(fun)
import time
import struct
import socket
import sys
import threading
import select
import copy
class MiniBusSocketClient(MiniBusClientCore):
"""
Threaded socket version of the MiniBusClient.
Based on code from:
http://svn.python.org/projects/python/trunk/Demo/sockets/mcast.py
https://ep2013.europython.eu/media/conference/slides/using-sockets-in-python.html
"""
def __init__(self, name=None, cryptokey=None):
MiniBusClientCore.__init__(self, name=name, cryptokey=cryptokey)
self.addrinfo = socket.getaddrinfo("228.0.0.5", None)[0]
self.s = socket.socket(self.addrinfo[0], socket.SOCK_DGRAM)
# Allow multiple copies of this program on one machine
# (not strictly needed)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# This is used in the twisted framework and fixes the single connection problem in osx
# twisted/internet/udp.py MulticastPort().createInternetSocket()
# http://twistedmatrix.com/trac/browser/tags/releases/twisted-14.0.1/twisted/internet/udp.py
if hasattr(socket, "SO_REUSEPORT"):
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# Set Time-to-live (optional)
self.ttl_bin = struct.pack('@i', 1)
if self.addrinfo[0] == socket.AF_INET: # IPv4
self.s.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, self.ttl_bin)
else:
self.s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, self.ttl_bin)
# Bind it to the port for receiving
self.s.bind(('', 8005))
group_bin = socket.inet_pton(self.addrinfo[0], self.addrinfo[4][0])
# Join group
if self.addrinfo[0] == socket.AF_INET: # IPv4
mreq = group_bin + struct.pack('=I', socket.INADDR_ANY)
self.s.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
else:
mreq = group_bin + struct.pack('@I', 0)
self.s.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_JOIN_GROUP, mreq)
# Start receiving thread
self._running = False
self._lock = threading.Lock()
self._recv_thread = threading.Thread(target=self.recv_thread)
self._recv_thread.start()
# def conn_thread(self):
# self._running = True
# rpipe, wpipe = os.pipe()
# while self._running:
# readable, writable, _ = select.select([rpipe, self.s], [wpipe], [], 60)
# if self.s in readable:
# data, sender = self.s.recvfrom(1500)
# while data[-1:] == '\n':
# data = data[:-1]
# data = copy.deepcopy(data)
# self.recv_packet(data)
# time.sleep(.00001)
def recv_thread(self):
""" receive incomming data """
self._running = True
self.s.setblocking(1)
while self._running:
data = self.s.recv(1500)
while data[-1:] == '\n':
data = data[:-1]
data = copy.deepcopy(data)
if data:
self.recv_packet(data)
def service_func_client(self, name, reqst_schema, reply_schema):
""" This implementation is somewhat specific to the threaded sockets """
class ServiceFuncClient(object):
def __init__(self, mbclient, name, reqst_schema, reply_schema):
self.mbclient = mbclient
self._service_replies = dict()
self.callpub = self.mbclient.service_client(name, reqst_schema, reply_schema, self.reply_cb, self.err_cb)
self._lock = threading.Lock()
self._error = False
def reply_cb(self, idstr, data):
# with self._lock:
# print "submitted", data
self._service_replies[idstr] = data
# TODO IF thing not listed, set timer and try again?
def err_cb(self, idstr, data):
# TODO: This is a real dirty way of doing this
self._error = True
def __call__(self, data):
idstr = self.callpub(data)
self._error = False
while self.mbclient._running and not self._error:
if idstr in self._service_replies.keys():
break
time.sleep(0.001)
if not self.mbclient._running:
raise Exception("Shutting down")
if self._error:
raise NotImplementedException("Implemented this error")
with self._lock:
return self._service_replies.pop(idstr)
return ServiceFuncClient(self, name, reqst_schema, reply_schema)
def send_packet(self, data):
self._logger.debug("Writing to transport")
self.s.sendto(data + '\n', (self.addrinfo[4][0], 8005))
self._logger.debug("Finished")
def _run_callback(self, cb, header, data):
thread = threading.Thread(target=cb, args=(header, data))
thread.start()
def spin(self):
""" A function to keep the main thread alive until keyboard interrput """
try:
while self._running:
time.sleep(0.1)
except KeyboardInterrupt:
return
def close(self):
""" Properly shutdown all the connections """
self._running = False
with self._lock:
if self.s:
try:
self.s.shutdown(socket.SHUT_RD)
except socket.error:
# If the above fails, we hang at recv. Send an empty packet to release it.
self.s.sendto('\n', (self.addrinfo[4][0], 8005))
self.s.close()
self.s = None # Prevent socket from trying to be closed a second time
# If called from the main thread, join with receiver before exiting
if not threading.current_thread() == self._recv_thread:
self._recv_thread.join()
class MBLagerLogger(logging.Logger):
""" King of Loggers - Embedded in Minibus to reduce dependencies """
def __init__(self, name, level=None):
logging.Logger.__init__(self, name, self.__level(level))
self.formatter = logging.Formatter("%(asctime)s %(levelname)-8s %(message)s", "%Y-%m-%d %H:%M:%S")
def __level(self, lvl):
return lvl if lvl is not None else logging.DEBUG
def console(self, level):
""" adds a console handler """
ch = logging.StreamHandler()
ch.setLevel(self.__level(level))
ch.setFormatter(self.formatter)
self.addHandler(ch)
def logfile(self, level, path=None):
if path is None:
path = "log.log"
path = os.path.normpath(os.path.expanduser(path))
try:
# Attempt to set up the logger with specified log target
open(path, "a").close()
hdlr = logging.handlers.RotatingFileHandler(path, maxBytes=500000, backupCount=5)
hdlr.setLevel(self.__level(level))
hdlr.setFormatter(self.formatter)
except IOError:
logging.error('Failed to open file %s for logging' % logpath, exc_info=True)
sys.exit(1)
self.addHandler(hdlr)
| apache-2.0 |
sugartom/tensorflow-alien | tensorflow/python/debug/lib/stepper_test.py | 39 | 45190 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests of the tfdbg Stepper."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session
from tensorflow.python.debug.lib.stepper import NodeStepper
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import gradient_descent
class StepperTest(test_util.TensorFlowTestCase):
def setUp(self):
self.a = variables.Variable(2.0, name="a")
self.b = variables.Variable(3.0, name="b")
self.c = math_ops.multiply(self.a, self.b, name="c") # Should be 6.0.
self.d = math_ops.multiply(self.a, self.a, name="d") # Should be 4.0.
self.e = math_ops.multiply(self.d, self.c, name="e") # Should be 24.0.
self.f_y = constant_op.constant(0.30, name="f_y")
self.f = math_ops.div(self.b, self.f_y, name="f") # Should be 10.0.
# The there nodes x, y and z form a graph with "cross-links" in. I.e., x
# and y are both direct inputs to z, but x is also a direct input to y.
self.x = variables.Variable(2.0, name="x") # Should be 2.0
self.y = math_ops.negative(self.x, name="y") # Should be -2.0.
self.z = math_ops.multiply(self.x, self.y, name="z") # Should be -4.0.
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToFetchNotInTransitiveClosureShouldError(self):
with NodeStepper(self.sess, "e:0") as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(7, len(sorted_nodes))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("a/read"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("b/read"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("d"))
self.assertLess(sorted_nodes.index("d"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("c"), sorted_nodes.index("e"))
self.assertSetEqual(
{"e:0", "d:0", "c:0", "a/read:0", "b/read:0", "b:0", "a:0"},
set(stepper.closure_elements()))
with self.assertRaisesRegexp(
ValueError,
"Target \"f:0\" is not in the transitive closure for the fetch of "
"the stepper"):
stepper.cont("f:0")
def testContToNodeNameShouldReturnTensorValue(self):
with NodeStepper(self.sess, "e:0") as stepper:
self.assertAllClose(6.0, stepper.cont("c"))
def testUsingNamesNotUsingIntermediateTensors(self):
with NodeStepper(self.sess, "e:0") as stepper:
# The first cont() call should have used no feeds.
result = stepper.cont("c:0")
self.assertAllClose(6.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertEqual({}, stepper.last_feed_types())
# The second cont() call should have used the tensor handle from the
# previous cont() call.
result = stepper.cont("e:0")
self.assertAllClose(24.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0", "d:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(4.0, stepper.get_tensor_value("d:0"))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testUsingNodesNotUsingIntermediateTensors(self):
with NodeStepper(self.sess, self.e) as stepper:
# There should be no handles before any cont() calls.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
# Before the cont() call, the stepper should not have access to the value
# of c:0.
with self.assertRaisesRegexp(
ValueError,
"This stepper instance does not have access to the value of tensor "
"\"c:0\""):
stepper.get_tensor_value("c:0")
# Using the node/tensor itself, instead of the name str, should work on
# cont().
result = stepper.cont(self.c)
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(["c:0"], stepper.handle_names())
self.assertEqual({"c"}, stepper.handle_node_names())
# After the cont() call, the stepper should have access to the value of
# c:0 via a tensor handle.
self.assertAllClose(6.0, stepper.get_tensor_value("c:0"))
result = stepper.cont(self.e)
self.assertAllClose(24.0, result)
self.assertItemsEqual(["a/read:0", "b/read:0", "d:0"],
stepper.intermediate_tensor_names())
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testContToTensorWithIntermediateDumpShouldUseDump(self):
with NodeStepper(self.sess, ["e:0", "f:0"]) as stepper:
stepper.cont("c:0")
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(2.0, stepper.cont("a/read:0"))
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
self.assertAllClose(10.0, stepper.cont("f:0"))
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
def testDisablingUseDumpedIntermediatesWorks(self):
with NodeStepper(self.sess, ["e:0", "f:0"]) as stepper:
stepper.cont("c:0")
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(2.0, stepper.get_tensor_value("a/read:0"))
self.assertAllClose(3.0, stepper.get_tensor_value("b/read:0"))
self.assertAllClose(10.0,
stepper.cont("f:0", use_dumped_intermediates=False))
self.assertEqual({}, stepper.last_feed_types())
def testIsFeedableShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.e) as stepper:
self.assertTrue(stepper.is_feedable("a/read:0"))
self.assertTrue(stepper.is_feedable("b/read:0"))
self.assertTrue(stepper.is_feedable("c:0"))
self.assertTrue(stepper.is_feedable("d:0"))
def testOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
# There should be no overrides before any cont() calls.
self.assertEqual([], stepper.override_names())
# Calling cont() on c again should lead to use of the handle.
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# After the overriding, calling get_tensor_value() on c:0 should yield the
# overriding value.
self.assertEqual(7.0, stepper.get_tensor_value("c:0"))
# Now c:0 should have only an override value, but no cached handle,
# because the handle should have been invalidated.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
# Run a downstream tensor after the value override.
result = stepper.cont(self.e)
self.assertAllClose(28.0, result) # Should reflect the overriding value.
# Should use override, instead of the handle.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testOverrideValueTwice(self):
with NodeStepper(self.sess, self.e) as stepper:
# Override once.
stepper.override_tensor("c:0", 7.0)
self.assertAllClose(28.0, stepper.cont(self.e))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
self.assertEqual(["e:0"], stepper.handle_names())
self.assertSetEqual({"e"}, stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
# Calling cont(self.e) again. This time the cached tensor handle of e
# should be used.
self.assertEqual(28.0, stepper.cont(self.e))
self.assertEqual({
"e:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c again. This should have invalidated the cache for e.
stepper.override_tensor("c:0", 8.0)
self.assertEqual([], stepper.handle_names())
self.assertEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
self.assertAllClose(32.0, stepper.cont(self.e))
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"d:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
def testRemoveOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
# The previous cont() step should have generated a cached tensor handle.
self.assertEqual(["c:0"], stepper.handle_names())
self.assertSetEqual({"c"}, stepper.handle_node_names())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# The overriding should have invalidated the tensor handle.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
self.assertEqual(["c:0"], stepper.override_names())
result = stepper.cont(self.e)
self.assertAllClose(28.0, result) # Should reflect the overriding value.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE,
"a/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# The handle to tensor e:0 should have been cached, even though its
# transitive closure contains an override.
self.assertIn("e:0", stepper.handle_names())
self.assertSetEqual({"e"}, stepper.handle_node_names())
# Remove the override.
stepper.remove_override("c:0")
# c:0 should not be in the overrides anymore.
self.assertEqual([], stepper.override_names())
# Removing the override should have invalidated the tensor handle for c.
self.assertNotIn("e:0", stepper.handle_names())
self.assertNotIn("e", stepper.handle_node_names())
# Should reflect the non-overriding value.
self.assertAllClose(24.0, stepper.cont(self.e))
# This time, the handle to tensor e:0 should have been cached again, even
# thought its transitive closure contains an override.
self.assertIn("e:0", stepper.handle_names())
self.assertIn("e", stepper.handle_node_names())
# Calling cont(self.e) again should have used the tensor handle to e:0.
self.assertAllClose(24.0, stepper.cont(self.e))
self.assertEqual({
"e:0": NodeStepper.FEED_TYPE_HANDLE,
}, stepper.last_feed_types())
def testOverrideAndContToSameTensor(self):
with NodeStepper(self.sess, self.e) as stepper:
result = stepper.cont(self.c)
self.assertAllClose(6.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertEqual(["c:0"], stepper.handle_names())
self.assertSetEqual({"c"}, stepper.handle_node_names())
self.assertAllClose(6.0, stepper.cont(self.c))
# The last cont() call should use the tensor handle directly.
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Override c:0.
stepper.override_tensor("c:0", 7.0)
# As a result of the override, the tensor handle should have been
# invalidated.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
result = stepper.cont(self.c)
self.assertAllClose(7.0, result)
self.assertEqual({
"c:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
def testFinalizeWithPreviousOverrides(self):
with NodeStepper(self.sess, self.e) as stepper:
stepper.override_tensor("a/read:0", 20.0)
self.assertEqual(["a/read:0"], stepper.override_names())
# Should reflect the overriding value.
self.assertAllClose(24000.0, stepper.cont("e:0"))
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
# Finalize call should have ignored the overriding value.
self.assertAllClose(24.0, stepper.finalize())
def testRemoveNonexistentOverrideValue(self):
with NodeStepper(self.sess, self.e) as stepper:
self.assertEqual([], stepper.override_names())
with self.assertRaisesRegexp(
ValueError, "No overriding value exists for tensor \"c:0\""):
stepper.remove_override("c:0")
def testAttemptToOverrideInvalidTensor(self):
stepper = NodeStepper(self.sess, self.e)
with self.assertRaisesRegexp(ValueError, "Cannot override tensor \"f:0\""):
stepper.override_tensor("f:0", 42.0)
def testInvalidOverrideArgumentType(self):
with NodeStepper(self.sess, self.e) as stepper:
with self.assertRaisesRegexp(TypeError, "Expected type str; got type"):
stepper.override_tensor(self.a, 42.0)
def testTransitiveClosureWithCrossLinksShouldHaveCorrectOrder(self):
with NodeStepper(self.sess, "z:0") as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(4, len(sorted_nodes))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("x/read"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("y"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("y"), sorted_nodes.index("z"))
def testNodeStepperConstructorShouldAllowListOrTupleOrDictOfFetches(self):
for i in range(6):
if i == 0:
fetches = [self.e, [self.f, self.z]]
elif i == 1:
fetches = (self.e, (self.f, self.z))
elif i == 2:
fetches = {"e": self.e, "fz": {"f": self.f, "z": self.z}}
elif i == 3:
fetches = ["e:0", ["f:0", "z:0"]]
elif i == 4:
fetches = ("e:0", ("f:0", "z:0"))
elif i == 5:
fetches = {"e": "e:0", "fz": {"f": "f:0", "z": "z:0"}}
with NodeStepper(self.sess, fetches) as stepper:
sorted_nodes = stepper.sorted_nodes()
self.assertEqual(13, len(sorted_nodes))
# Check the topological order of the sorted nodes.
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("x/read"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("y"))
self.assertLess(sorted_nodes.index("x"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("y"), sorted_nodes.index("z"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("a/read"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("b/read"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("c"))
self.assertLess(sorted_nodes.index("a"), sorted_nodes.index("d"))
self.assertLess(sorted_nodes.index("d"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("c"), sorted_nodes.index("e"))
self.assertLess(sorted_nodes.index("b"), sorted_nodes.index("f"))
self.assertLess(sorted_nodes.index("f_y"), sorted_nodes.index("f"))
closure_elements = stepper.closure_elements()
self.assertIn("x/read:0", closure_elements)
self.assertIn("e:0", closure_elements)
self.assertIn("f:0", closure_elements)
self.assertEqual([0], stepper.output_slots_in_closure("x/read"))
self.assertEqual([0], stepper.output_slots_in_closure("e"))
self.assertEqual([0], stepper.output_slots_in_closure("f"))
result = stepper.finalize()
if i == 0 or i == 1 or i == 3 or i == 4:
self.assertAllClose(24.0, result[0])
self.assertAllClose(10.0, result[1][0])
self.assertAllClose(-4.0, result[1][1])
elif i == 2 or i == 5:
self.assertAllClose(24.0, result["e"])
self.assertAllClose(10.0, result["fz"]["f"])
self.assertAllClose(-4.0, result["fz"]["z"])
class StepperTestWithPlaceHolders(test_util.TensorFlowTestCase):
def setUp(self):
self.ph0 = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph0")
self.ph1 = array_ops.placeholder(dtypes.float32, shape=(2, 1), name="ph1")
self.x = math_ops.matmul(self.ph0, self.ph1, name="x")
self.y = math_ops.add(self.x, self.ph1, name="y")
self.sess = session.Session()
def tearDown(self):
ops.reset_default_graph()
def testGetTensorValueWorksOnPlaceholder(self):
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
self.ph1: [[-1.0], [0.5]]
}) as stepper:
self.assertAllClose([[1.0, 2.0], [-3.0, 5.0]],
stepper.get_tensor_value("ph0"))
self.assertAllClose([[1.0, 2.0], [-3.0, 5.0]],
stepper.get_tensor_value("ph0:0"))
with self.assertRaisesRegexp(
KeyError,
r"The name 'ph0:1' refers to a Tensor which does not exist"):
stepper.get_tensor_value("ph0:1")
def testIsPlaceholdersShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.y) as stepper:
self.assertTrue(stepper.is_placeholder(self.ph0.name))
self.assertTrue(stepper.is_placeholder(self.ph1.name))
self.assertFalse(stepper.is_placeholder(self.x.name))
self.assertFalse(stepper.is_placeholder(self.y.name))
with self.assertRaisesRegexp(ValueError,
"A is not in the transitive closure"):
self.assertFalse(stepper.is_placeholder("A"))
def testPlaceholdersShouldGiveCorrectAnswers(self):
with NodeStepper(self.sess, self.y) as stepper:
self.assertSetEqual({"ph0", "ph1"}, set(stepper.placeholders()))
def testContWithPlaceholders(self):
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0: [[1.0, 2.0], [-3.0, 5.0]],
self.ph1: [[-1.0], [0.5]]
}) as stepper:
self.assertEqual(4, len(stepper.sorted_nodes()))
self.assertSetEqual({"ph0:0", "ph1:0", "x:0", "y:0"},
set(stepper.closure_elements()))
result = stepper.cont(self.x)
self.assertAllClose([[0.0], [5.5]], result)
self.assertEqual({
"ph0:0": NodeStepper.FEED_TYPE_CLIENT,
"ph1:0": NodeStepper.FEED_TYPE_CLIENT,
}, stepper.last_feed_types())
self.assertEqual(["x:0"], stepper.handle_names())
self.assertSetEqual({"x"}, stepper.handle_node_names())
result = stepper.cont(self.y)
self.assertAllClose([[-1.0], [6.0]], result)
self.assertEqual({
"x:0": NodeStepper.FEED_TYPE_HANDLE,
"ph1:0": NodeStepper.FEED_TYPE_CLIENT,
}, stepper.last_feed_types())
def testAttemptToContToPlaceholderWithTensorFeedKeysShouldWork(self):
"""Continuing to a placeholder should be allowed, using client feed."""
ph0_feed = [[1.0, 2.0], [-3.0, 5.0]]
ph1_feed = [[-1.0], [0.5]]
with NodeStepper(
self.sess, self.y, feed_dict={
self.ph0: ph0_feed,
self.ph1: ph1_feed,
}) as stepper:
self.assertAllClose(ph0_feed, stepper.cont(self.ph0))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose(ph1_feed, stepper.cont(self.ph1))
self.assertEqual({
self.ph1.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
ph0_node = self.sess.graph.as_graph_element("ph0")
self.assertAllClose(ph0_feed, stepper.cont(ph0_node))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
def testAttemptToContToPlaceholderWithTensorNameFeedKeysShouldWork(self):
ph0_feed = [[1.0, 2.0], [-3.0, 5.0]]
ph1_feed = [[-1.0], [0.5]]
with NodeStepper(
self.sess,
self.y,
feed_dict={
self.ph0.name: ph0_feed,
self.ph1.name: ph1_feed,
}) as stepper:
self.assertAllClose(ph0_feed, stepper.cont(self.ph0))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose(ph1_feed, stepper.cont(self.ph1))
self.assertEqual({
self.ph1.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
ph0_node = self.sess.graph.as_graph_element("ph0")
self.assertAllClose(ph0_feed, stepper.cont(ph0_node))
self.assertEqual({
self.ph0.name: NodeStepper.FEED_TYPE_CLIENT
}, stepper.last_feed_types())
self.assertAllClose([[-1.0], [6.0]], stepper.finalize())
class StepperAssignAddTest(test_util.TensorFlowTestCase):
def setUp(self):
self.v = variables.Variable(10.0, name="v")
self.p = math_ops.add(self.v, self.v, name="p")
self.q = math_ops.multiply(self.p, self.p, name="q")
self.delta = constant_op.constant(2.0, name="delta")
self.v_add = state_ops.assign_add(self.v, self.delta, name="v_add")
self.v_add_plus_one = math_ops.add(self.v_add,
1.0,
name="v_add_plus_one")
self.sess = session.Session()
self.sess.run(self.v.initializer)
def tearDown(self):
ops.reset_default_graph()
def testLastUpdatedVariablesReturnsNoneBeforeAnyContCalls(self):
with NodeStepper(self.sess, [self.q, self.v_add]) as stepper:
self.assertIsNone(stepper.last_updated())
def testContToUpdateInvalidatesDumpedIntermedates(self):
with NodeStepper(self.sess, [self.q, self.v_add]) as stepper:
self.assertAllClose(400.0, stepper.cont("q:0"))
self.assertItemsEqual(["v/read:0", "p:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(10.0, stepper.get_tensor_value("v/read:0"))
self.assertAllClose(20.0, stepper.get_tensor_value("p:0"))
self.assertAllClose(
12.0, stepper.cont(
self.v_add, invalidate_from_updated_variables=True))
self.assertAllClose(12.0, self.sess.run(self.v))
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertItemsEqual(["v:0"], stepper.dirty_variables())
# Updating the value of v by calling v_add should have invalidated the
# dumped intermediate tensors for v/read:0 and p:0.
self.assertItemsEqual(["delta:0"], stepper.intermediate_tensor_names())
with self.assertRaisesRegexp(
ValueError,
r"This stepper instance does not have access to the value of tensor "
r"\"p:0\""):
stepper.get_tensor_value("p:0")
# The next cont to q should not have used any dumped intermediate tensors
# and its result should reflect the updated value.
self.assertAllClose(576.0, stepper.cont("q:0"))
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({}, stepper.last_feed_types())
def testOverridingUpstreamTensorInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, self.q) as stepper:
self.assertAllClose(400.0, stepper.cont("q:0"))
self.assertItemsEqual(["v/read:0", "p:0"],
stepper.intermediate_tensor_names())
self.assertAllClose(10.0, stepper.get_tensor_value("v/read:0"))
self.assertAllClose(20.0, stepper.get_tensor_value("p:0"))
stepper.override_tensor("v/read:0", 11.0)
self.assertItemsEqual(["v/read:0"], stepper.override_names())
# Overriding the upstream v/read:0 should have invalidated the dumped
# intermediate tensor for the downstream p:0.
self.assertItemsEqual([], stepper.intermediate_tensor_names())
# The next cont to q should not have used any dumped intermediate tensors
# and its result should reflect the overriding value.
self.assertAllClose(484.0, stepper.cont("q:0"))
self.assertEqual({
"v/read:0": NodeStepper.FEED_TYPE_OVERRIDE
}, stepper.last_feed_types())
def testRemovingOverrideToUpstreamTensorInvalidatesDumpedIntermediates(self):
with NodeStepper(self.sess, self.q) as stepper:
stepper.override_tensor("v/read:0", 9.0)
self.assertItemsEqual(["v/read:0"], stepper.override_names())
self.assertAllClose(324.0, stepper.cont(self.q))
self.assertItemsEqual(["p:0"], stepper.intermediate_tensor_names())
stepper.remove_override("v/read:0")
self.assertItemsEqual([], stepper.override_names())
# Removing the pre-existing override to v/read:0 should have invalidated
# the dumped intermediate tensor.
self.assertItemsEqual([], stepper.intermediate_tensor_names())
def testRepeatedCallsToAssignAddDoesNotUpdateVariableAgain(self):
with NodeStepper(self.sess, self.v_add) as stepper:
stepper.cont(self.v_add)
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertAllClose(12.0, stepper.cont(self.v))
stepper.cont(self.v_add)
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({"v_add:0": NodeStepper.FEED_TYPE_HANDLE},
stepper.last_feed_types())
self.assertAllClose(12.0, stepper.cont(self.v))
def testRepeatedCallsToAssignAddDownStreamDoesNotUpdateVariableAgain(self):
with NodeStepper(self.sess, self.v_add_plus_one) as stepper:
stepper.cont(self.v_add_plus_one)
self.assertSetEqual({self.v.name}, stepper.last_updated())
self.assertAllClose(12.0, stepper.cont(self.v))
stepper.cont(self.v_add_plus_one)
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual({"v_add_plus_one:0": NodeStepper.FEED_TYPE_HANDLE},
stepper.last_feed_types())
self.assertAllClose(12.0, stepper.cont(self.v))
class StepperBackwardRunTest(test_util.TensorFlowTestCase):
def setUp(self):
"""Test setup.
Structure of the forward graph:
f
| |
----- -----
| |
d e
| | | |
--- --------- ---
| | |
a b c
Construct a backward graph using the GradientDescentOptimizer.
"""
self.a = variables.Variable(1.0, name="a")
self.b = variables.Variable(2.0, name="b")
self.c = variables.Variable(4.0, name="c")
self.d = math_ops.multiply(self.a, self.b, name="d")
self.e = math_ops.multiply(self.b, self.c, name="e")
self.f = math_ops.multiply(self.d, self.e, name="f")
# Gradient descent optimizer that minimizes g.
gradient_descent.GradientDescentOptimizer(0.01).minimize(
self.f, name="optim")
self.sess = session.Session()
self.sess.run(variables.global_variables_initializer())
def tearDown(self):
ops.reset_default_graph()
def testContToUpdateA(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("a:0")
self.assertAllClose(1.0, result)
self.assertEqual({}, stepper.last_feed_types())
result = stepper.cont("optim/learning_rate:0")
self.assertAllClose(0.01, result)
self.assertEqual({}, stepper.last_feed_types())
# Before any cont calls on ApplyGradientDescent, there should be no
# "dirty" variables.
self.assertEqual(set(), stepper.dirty_variables())
# First, all the two control inputs to optim.
result = stepper.cont("optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True)
# Now variable a should have been marked as dirty due to the update
# by optim/update_a/ApplyGradientDescent.
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIsNone(result)
self.assertEqual({
"optim/learning_rate:0": NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
# Check that Variable "a" has been updated properly, but "b", "c" and "d"
# remain the same.
# For backprop on Variable a:
# Because f = a * b * b * c, df / da = b * b * c.
# 1.0 - learning_rate * b * b * c
# = 1.0 - 0.01 * 2.0 * 2.0 * 4.0 = 0.84.
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContToUpdateB(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True)
self.assertIsNone(result)
self.assertSetEqual({"b:0"}, stepper.last_updated())
self.assertEqual(set(["b:0"]), stepper.dirty_variables())
# For backprop on Variable b:
# Because f = a * b * b * c, df / da = 2 * a * b * c.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContAfterUpdateWithoutRestoringVariableValue(self):
with NodeStepper(self.sess, "optim") as stepper:
# First, update Variable a from 1.0 to 0.84.
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual(set(["a:0"]), stepper.dirty_variables())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# Tracking of the updated variables should have invalidated all
# intermediate tensors downstream to a:0.
self.assertNotIn("a/read:0", stepper.intermediate_tensor_names())
self.assertNotIn("d:0", stepper.intermediate_tensor_names())
# Second, update Variable b without the default restore_variable_values.
result = stepper.cont(
"optim/update_b/ApplyGradientDescent", restore_variable_values=False)
self.assertIsNone(result)
# For the backprop on Variable b under the updated value of a:
# 2.0 - learning_rate * 2 * a' * b * c
# = 2.0 - 0.01 * 2 * 0.84 * 2.0 * 4.0 = 1.8656
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.8656, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testContNotInvalidatingFromVariableUpdatesWorksForNextUpdate(self):
with NodeStepper(self.sess, "optim") as stepper:
self.assertIsNone(stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=False))
# Even though invalidate_from_updated_variables is set to False, dirty
# variables should still have been tracked.
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
self.assertIn("a/read:0", stepper.intermediate_tensor_names())
self.assertIn("b/read:0", stepper.intermediate_tensor_names())
self.assertIn("c/read:0", stepper.intermediate_tensor_names())
self.assertIn("d:0", stepper.intermediate_tensor_names())
self.assertIn("e:0", stepper.intermediate_tensor_names())
self.assertIn("optim/learning_rate:0",
stepper.intermediate_tensor_names())
self.assertNotIn("a:0", stepper.intermediate_tensor_names())
self.assertNotIn("b:0", stepper.intermediate_tensor_names())
self.assertNotIn("c:0", stepper.intermediate_tensor_names())
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(2.0, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
# For the backprop on Variable b, the result should reflect the original
# value of Variable a, even though Variable a has actually been updated.
# 2.0 - learning_rate * 2 * a * b * c
# = 2.0 - 0.01 * 2 * 1.0 * 2.0 * 4.0 = 1.84
self.assertIsNone(stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=False,
restore_variable_values=False))
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testUpdateTwiceRestoreVariable(self):
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
result = stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# Variables a and c should have been restored and hence no longer dirty.
# Variable b should have been marked as dirty.
self.assertSetEqual({"b:0"}, stepper.last_updated())
self.assertEqual({"b:0"}, stepper.dirty_variables())
# The result of the update should be identitcal to as if only update_b is
# run.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testSelectiveHandleUsageDependingOnTransitiveCleanliness(self):
"""Test tensor handlers are using only during clean transitive closure.
"clean" means no Variables have been updated by preceding cont() calls.
"""
with NodeStepper(self.sess, "optim") as stepper:
# First, call cont() on the two tensors on the intermediate level: e and
# f.
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertItemsEqual(["a/read:0", "b/read:0"],
stepper.intermediate_tensor_names())
self.assertItemsEqual(["d:0"], stepper.handle_names())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
result = stepper.cont("e:0")
self.assertAllClose(8.0, result)
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE
}, stepper.last_feed_types())
self.assertItemsEqual(["d:0", "e:0"], stepper.handle_names())
self.assertItemsEqual(["a/read:0", "b/read:0", "c/read:0"],
stepper.intermediate_tensor_names())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
# Now run update_a, so as to let Variable a be dirty.
result = stepper.cont(
"optim/update_a/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# Due to the update to the value of a:0, the dumped intermediate a/read:0
# should have been invalidated.
self.assertNotIn("a/read:0", stepper.intermediate_tensor_names())
self.assertSetEqual({"a:0"}, stepper.last_updated())
self.assertEqual({"a:0"}, stepper.dirty_variables())
# Now, run update_b.
result = stepper.cont(
"optim/update_b/ApplyGradientDescent", restore_variable_values=True)
self.assertIsNone(result)
# The last cont() run should have use the handle of tensor e, but not the
# handle of tensor d, because the transitive closure of e is clean,
# whereas that of d is dirty due to the update to a in the previous cont()
# call.
last_feed_types = stepper.last_feed_types()
self.assertNotIn("d:0", last_feed_types)
self.assertEqual(NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
last_feed_types["b/read:0"])
self.assertEqual(NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
last_feed_types["c/read:0"])
# The result of the update_b should be identical to as if no other
# update_* cont() calls have occurred before.
self.assertAllClose(1.0, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(4.0, self.sess.run(self.c))
def testRestoreVariableValues(self):
"""Test restore_variable_values() restores the old values of variables."""
with NodeStepper(self.sess, "optim") as stepper:
stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertAllClose(1.84, self.sess.run(self.b))
stepper.restore_variable_values()
self.assertAllClose(2.0, self.sess.run(self.b))
def testFinalize(self):
"""Test finalize() to restore variables and run the original fetch."""
with NodeStepper(self.sess, "optim") as stepper:
# Invoke update_b before calling finalize.
stepper.cont(
"optim/update_b/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
result = stepper.finalize()
self.assertIsNone(result)
# The results of the Variable updates should be the same as if no cont()
# call has occurred on update_b.
self.assertAllClose(0.84, self.sess.run(self.a))
self.assertAllClose(1.84, self.sess.run(self.b))
self.assertAllClose(3.96, self.sess.run(self.c))
def testOverrideThenContToUpdateThenRemoveOverrideThenUpdateAgain(self):
"""Test cont() to update nodes after overriding tensor values."""
with NodeStepper(self.sess, "optim") as stepper:
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual({}, stepper.last_feed_types())
self.assertSetEqual(set(), stepper.last_updated())
self.assertEqual(set(), stepper.dirty_variables())
self.assertEqual(["d:0"], stepper.handle_names())
self.assertSetEqual({"d"}, stepper.handle_node_names())
# Override the value from 1.0 to 10.0.
stepper.override_tensor("a/read:0", 10.0)
self.assertEqual(["a/read:0"], stepper.override_names())
result = stepper.cont(
"optim/update_c/ApplyGradientDescent",
invalidate_from_updated_variables=True,
restore_variable_values=True)
self.assertIsNone(result)
# The last cont() call should have not used the tensor handle to d:0,
# because the transitive closure of d:0 contains an override tensor.
self.assertEqual({
"a/read:0": NodeStepper.FEED_TYPE_OVERRIDE,
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# The tensor handle to d:0 should have been removed due to the dirty
# transitive closure.
self.assertEqual([], stepper.handle_names())
self.assertSetEqual(set(), stepper.handle_node_names())
# For this backprop on c, the overriding value of a/read:0 should have
# been used:
# 4.0 - learning_rate * a * b * b
# = 4.0 - 0.01 * 10.0 * 2.0 * 2.0 = 3.6.
self.assertAllClose(3.6, self.sess.run(self.c))
# Now remove the overriding value of a/read:0.
stepper.remove_override("a/read:0")
self.assertEqual([], stepper.override_names())
# Obtain the tensor handle to d:0 again.
result = stepper.cont("d:0")
self.assertAllClose(2.0, result)
self.assertEqual(["d:0"], stepper.handle_names())
self.assertSetEqual({"d"}, stepper.handle_node_names())
self.assertNotIn("a/read:0", stepper.last_feed_types())
# Then call update_c again, without restoring c.
result = stepper.cont("optim/update_c/ApplyGradientDescent",
restore_variable_values=False)
self.assertIsNone(result)
self.assertNotIn("a/read:0", stepper.last_feed_types())
# This time, the d:0 tensor handle should have been used, because its
# transitive closure is clean.
self.assertEqual({
"b/read:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
"d:0": NodeStepper.FEED_TYPE_HANDLE,
"optim/learning_rate:0": NodeStepper.FEED_TYPE_DUMPED_INTERMEDIATE,
}, stepper.last_feed_types())
# For this backprop on c, the overriding value of a/read:0 should have
# been used:
# 3.6 - learning_rate * a * b * b
# = 3.6 - 0.01 * 1.0 * 2.0 * 2.0 = 3.56.
self.assertAllClose(3.56, self.sess.run(self.c))
def testContToNodeWithOutputTensors(self):
"""cont() to an op should cache its output tensors if appropriate."""
with NodeStepper(self.sess, "optim") as stepper:
# In the transitive closure of the stepper, look for an op of which the
# output tensor also is in the transitive closure.
# Do not assume a specific op, e.g., ""gradients/e_grad/Reshape_1",
# because it may vary between builds.
closure_elements = stepper.closure_elements()
op_with_output_in_closure = None
for element_name in closure_elements:
if element_name + ":0" in closure_elements:
op_with_output_in_closure = str(element_name)
break
self.assertEqual(
[0], stepper.output_slots_in_closure(op_with_output_in_closure))
self.assertIsNotNone(op_with_output_in_closure)
output_tensor = op_with_output_in_closure + ":0"
# The op "gradients/?_grad/Reshape_1" is in the transitive closure of the
# stepper, because it is the control input to another o. However, its
# output tensor "gradients/?_grad/Reshape_1:0" is also in the transitive
# closure, because it is the (non-control) input of certain ops. Calling
# cont() on the op should lead to the caching of the tensor handle for
# the output tensor.
stepper.cont(op_with_output_in_closure)
self.assertEqual([output_tensor], stepper.handle_names())
self.assertSetEqual({op_with_output_in_closure},
stepper.handle_node_names())
# Do a cont() call that uses the cached tensor of
# "gradients/?_grad/Reshape_1:0".
stepper.cont(output_tensor)
self.assertEqual({
output_tensor: NodeStepper.FEED_TYPE_HANDLE
}, stepper.last_feed_types())
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
wenqf11/huhamhire-hosts | tui/curses_ui.py | 24 | 23013 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# curses_ui.py: Draw TUI using curses.
#
# Copyleft (C) 2014 - huhamhire <me@huhamhire.com>
# =====================================================================
# Licensed under the GNU General Public License, version 3. You should
# have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# =====================================================================
__author__ = "huhamhire <me@huhamhire.com>"
import curses
import locale
import sys
sys.path.append("..")
from util import CommonUtil
from __version__ import __version__, __release__
class CursesUI(object):
"""
CursesUI class contains methods to draw the Text-based User Interface
(TUI) of Hosts Setup Utility. The methods to make TUI here are based on
`curses <http://docs.python.org/2/library/curses.html>`_.
:ivar str __title: Title of the TUI utility.
:ivar str __copyleft: Copyleft information of the TUI utility.
:ivar WindowObject _stdscr: A **WindowObject** which represents the whole
screen.
:ivar int _item_sup: Upper bound of item index from `function selection
list`.
:ivar int _item_inf: Lower bound of item index from `function selection
list`.
:ivar str _make_path: Temporary path to store the hosts file in while
building. The default _make_path is `./hosts`.
:ivar list _funcs: Two lists with the information of function list both
for IPv4 and IPv6 environment.
:ivar list choice: Two lists with the selection of functions both
for IPv4 and IPv6 environment.
:ivar list slices: Two lists with integers indicating the number of
function items from different parts listed in the function list.
.. seealso:: `_funcs`, `choice`, and `slices` in
:class:`~gui.qdialog_d.QDialogDaemon` class.
:ivar str sys_eol: The End-Of-Line marker. This maker could typically be
one of `CR`, `LF`, or `CRLF`.
.. seealso:: :attr:`sys_eol` in
:class:`~gui.qdialog_d.QDialogDaemon` class.
:ivar list colorpairs: Tuples of `(foreground-color, background-color)`
used while drawing TUI.
The default colorpairs is defined as::
colorpairs = [(curses.COLOR_WHITE, curses.COLOR_BLUE),
(curses.COLOR_WHITE, curses.COLOR_RED),
(curses.COLOR_YELLOW, curses.COLOR_BLUE),
(curses.COLOR_BLUE, curses.COLOR_WHITE),
(curses.COLOR_WHITE, curses.COLOR_WHITE),
(curses.COLOR_BLACK, curses.COLOR_WHITE),
(curses.COLOR_GREEN, curses.COLOR_WHITE),
(curses.COLOR_WHITE, curses.COLOR_BLACK),
(curses.COLOR_RED, curses.COLOR_WHITE), ]
:ivar list settings: Two list containing the server selection and IP
protocol version of current session.
The settings should be like::
settings = [["Server", 0, []],
["IP Version", 0, ["IPv4", "IPv6"]]]
:ivar list funckeys: Lists of hot keys with their function to be shown on
TUI.
The default :attr:`funckeys` is defined as::
funckeys = [["", "Select Item"], ["Tab", "Select Field"],
["Enter", "Set Item"], ["F5", "Check Update"],
["F6", "Fetch Update"], ["F10", "Apply Changes"],
["Esc", "Exit"]]
:ivar list statusinfo: Two lists containing the connection and OS checking
status of current session.
The default :attr:`statusinfo` is defined as::
statusinfo = [["Connection", "N/A", "GREEN"],
["OS", "N/A", "GREEN"]]
:ivar dict hostsinfo: Containing the `Version`, `Release Date` of current
hosts data file and the `Latest Version` of the data file on server.
The default hostsinfo is defined as::
hostsinfo = {"Version": "N/A", "Release": "N/A", "Latest": "N/A"}
.. note:: IF the hosts data file does NOT exist in current working
directory, OR the file metadata has NOT been checked, the values
here would just be `N/A`.
:ivar str filename: Filename of the hosts data file containing data to
make hosts files from. Default by "`hostslist.data`".
:ivar str infofile: Filename of the info file containing metadata of the
hosts data file formatted in JSON. Default by "`hostslist.json`".
.. seealso:: :attr:`filename` and :attr:`infofile` in
:class:`~gui.hostsutil.HostsUtil` class.
:ivar str custom: File name of User Customized Hosts File. Customized
hosts would be able to select if this file exists. The default file
name is ``custom.hosts``.
.. seealso:: :ref:`User Customized Hosts<intro-customize>`.
"""
__title = "HOSTS SETUP UTILITY"
version = "".join(['v', __version__, ' ', __release__])
__copyleft = "%s Copyleft 2011-2014, huhamhire-hosts Team" % version
_stdscr = None
_item_sup = 0
_item_inf = 0
_make_path = "./hosts"
_funcs = [[], []]
choice = [[], []]
slices = [[], []]
sys_eol = ""
colorpairs = [(curses.COLOR_WHITE, curses.COLOR_BLUE),
(curses.COLOR_WHITE, curses.COLOR_RED),
(curses.COLOR_YELLOW, curses.COLOR_BLUE),
(curses.COLOR_BLUE, curses.COLOR_WHITE),
(curses.COLOR_WHITE, curses.COLOR_WHITE),
(curses.COLOR_BLACK, curses.COLOR_WHITE),
(curses.COLOR_GREEN, curses.COLOR_WHITE),
(curses.COLOR_WHITE, curses.COLOR_BLACK),
(curses.COLOR_RED, curses.COLOR_WHITE), ]
settings = [["Server", 0, []],
["IP Version", 0, ["IPv4", "IPv6"]]]
funckeys = [["", "Select Item"], ["Tab", "Select Field"],
["Enter", "Set Item"], ["F5", "Check Update"],
["F6", "Fetch Update"], ["F10", "Apply Changes"],
["Esc", "Exit"]]
statusinfo = [["Connection", "N/A", "GREEN"], ["OS", "N/A", "GREEN"]]
hostsinfo = {"Version": "N/A", "Release": "N/A", "Latest": "N/A"}
filename = "hostslist.data"
infofile = "hostsinfo.json"
custom = "custom.hosts"
def __init__(self):
"""
Initialize a new TUI window in terminal.
"""
locale.setlocale(locale.LC_ALL, '')
self._stdscr = curses.initscr()
curses.start_color()
curses.noecho()
curses.cbreak()
curses.curs_set(0)
# Set colors
curses.use_default_colors()
for i, color in enumerate(self.colorpairs):
curses.init_pair(i + 1, *color)
def __del__(self):
"""
Reset terminal before quit.
"""
curses.nocbreak()
curses.echo()
curses.endwin()
def banner(self):
"""
Draw the banner in the TUI window.
"""
screen = self._stdscr.subwin(2, 80, 0, 0)
screen.bkgd(' ', curses.color_pair(1))
# Set local variable
title = curses.A_NORMAL
title += curses.A_BOLD
normal = curses.color_pair(4)
# Print title
screen.addstr(0, 0, self.__title.center(79), title)
screen.addstr(1, 0, "Setup".center(10), normal)
screen.refresh()
def footer(self):
"""
Draw the footer in the TUI window.
"""
screen = self._stdscr.subwin(1, 80, 23, 0)
screen.bkgd(' ', curses.color_pair(1))
# Set local variable
normal = curses.A_NORMAL
# Copyright info
copyleft = self.__copyleft
screen.addstr(0, 0, copyleft.center(79), normal)
screen.refresh()
def configure_settings_frame(self, pos=None):
"""
Draw `Configure Setting` frame with a index number (`pos`) of the item
selected.
:param pos: Index of selected item in `Configure Setting` frame. The
default value of `pos` is `None`.
:type pos: int or None
.. note:: None of the items in `Configure Setting` frame would be
selected if pos is `None`.
"""
self._stdscr.keypad(1)
screen = self._stdscr.subwin(8, 25, 2, 0)
screen.bkgd(' ', curses.color_pair(4))
# Set local variable
normal = curses.A_NORMAL
select = curses.color_pair(5)
select += curses.A_BOLD
for p, item in enumerate(self.settings):
item_str = item[0].ljust(12)
screen.addstr(3 + p, 2, item_str, select if p == pos else normal)
if p:
choice = "[%s]" % item[2][item[1]]
else:
choice = "[%s]" % item[2][item[1]]["label"]
screen.addstr(3 + p, 15, ''.ljust(10), normal)
screen.addstr(3 + p, 15, choice, select if p == pos else normal)
screen.refresh()
def status(self):
"""
Draw `Status` frame and `Hosts File` frame in the TUI window.
"""
screen = self._stdscr.subwin(11, 25, 10, 0)
screen.bkgd(' ', curses.color_pair(4))
# Set local variable
normal = curses.A_NORMAL
green = curses.color_pair(7)
red = curses.color_pair(9)
# Status info
for i, stat in enumerate(self.statusinfo):
screen.addstr(2 + i, 2, stat[0], normal)
stat_str = ''.join(['[', stat[1], ']']).ljust(9)
screen.addstr(2 + i, 15, stat_str,
green if stat[2] == "GREEN" else red)
# Hosts file info
i = 0
for key, info in self.hostsinfo.items():
screen.addstr(7 + i, 2, key, normal)
screen.addstr(7 + i, 15, info, normal)
i += 1
screen.refresh()
def show_funclist(self, pos, item_sup, item_inf):
"""
Draw `function selection list` frame with a index number of the item
selected and the range of items to be displayed.
:param pos: Index of selected item in `function selection list`.
:type pos: int or None
:param item_sup: Upper bound of item index from `function selection
list`.
:type item_sup: int
:param item_inf: Lower bound of item index from `function selection
list`.
:type item_inf: int
"""
# Set UI variable
screen = self._stdscr.subwin(18, 26, 2, 26)
screen.bkgd(' ', curses.color_pair(4))
normal = curses.A_NORMAL
select = curses.color_pair(5)
select += curses.A_BOLD
list_height = 15
# Set local variable
ip = self.settings[1][1]
item_len = len(self.choice[ip])
# Function list
items_show = self.choice[ip][item_sup:item_inf]
items_selec = self._funcs[ip][item_sup:item_inf]
for p, item in enumerate(items_show):
sel_ch = '+' if items_selec[p] else ' '
item_str = ("[%s] %s" % (sel_ch, item[3])).ljust(23)
item_pos = pos - item_sup if pos is not None else None
highlight = select if p == item_pos else normal
if item_len > list_height:
if item_inf - item_sup == list_height - 2:
screen.addstr(4 + p, 2, item_str, highlight)
elif item_inf == item_len:
screen.addstr(4 + p, 2, item_str, highlight)
elif item_sup == 0:
screen.addstr(3 + p, 2, item_str, highlight)
else:
screen.addstr(3 + p, 2, item_str, highlight)
if item_len > list_height:
if item_inf - item_sup == list_height - 2:
screen.addstr(3, 2, " More ".center(23, '.'), normal)
screen.addch(3, 15, curses.ACS_UARROW)
screen.addstr(17, 2, " More ".center(23, '.'), normal)
screen.addch(17, 15, curses.ACS_DARROW)
elif item_inf == item_len:
screen.addstr(3, 2, " More ".center(23, '.'), normal)
screen.addch(3, 15, curses.ACS_UARROW)
elif item_sup == 0:
screen.addstr(17, 2, " More ".center(23, '.'), normal)
screen.addch(17, 15, curses.ACS_DARROW)
else:
for line_i in range(list_height - item_len):
screen.addstr(17 - line_i, 2, ' ' * 23, normal)
if not items_show:
screen.addstr(4, 2, "No data file!".center(23), normal)
screen.refresh()
self._item_sup, self._item_inf = item_sup, item_inf
def info(self, pos, tab):
"""
Draw `Information` frame with a index number (`pos`) of the item
selected from the frame specified by `tab`.
:param pos: Index of selected item in a specified frame.
:type pos: int
:param tab: Index of the frame to select items from.
:type tab: int
.. warning:: Both of `pos` and `tab` in this method could not be
set to `None`.
"""
screen = self._stdscr.subwin(18, 24, 2, 52)
screen.bkgd(' ', curses.color_pair(4))
normal = curses.A_NORMAL
if tab:
ip = self.settings[1][1]
info_str = self.choice[ip][pos][3]
else:
info_str = self.settings[pos][0]
# Clear Expired Infomotion
for i in range(6):
screen.addstr(1 + i, 2, ''.ljust(22), normal)
screen.addstr(1, 2, info_str, normal)
# Key Info Offset
k_info_y = 10
k_info_x_key = 2
k_info_x_text = 10
# Arrow Keys
screen.addch(k_info_y, k_info_x_key, curses.ACS_UARROW, normal)
screen.addch(k_info_y, k_info_x_key + 1, curses.ACS_DARROW, normal)
# Show Key Info
for i, keyinfo in enumerate(self.funckeys):
screen.addstr(k_info_y + i, k_info_x_key, keyinfo[0], normal)
screen.addstr(k_info_y + i, k_info_x_text, keyinfo[1], normal)
screen.refresh()
def process_bar(self, done, block, total, mode=1):
"""
Draw `Process Bar` at the bottom which is used to indicate progress of
downloading operation.
.. note:: This method is a callback function responses to
:meth:`urllib.urlretrieve` method while fetching hosts data
file.
:param done: Block count of packaged retrieved.
:type done: int
:param block: Block size of the data pack retrieved.
:type block: int
:param total: Total size of the hosts data file.
:type total: int
:param mode: A flag indicating the status of `Process Bar`.
The default value of `mode` is `1`.
==== ====================================
mode `Process Bar` status
==== ====================================
1 Downloading operation is processing.
0 Not downloading.
==== ====================================
:type mode: int
"""
screen = self._stdscr.subwin(2, 80, 20, 0)
screen.bkgd(' ', curses.color_pair(4))
normal = curses.A_NORMAL
line_width = 76
prog_len = line_width - 20
# Progress Bar
if mode:
done *= block
prog = 1.0 * prog_len * done / total
progress = ''.join(['=' * int(prog), '-' * int(2 * prog % 2)])
progress = progress.ljust(prog_len)
total = CommonUtil.convert_size(total).ljust(7)
done = CommonUtil.convert_size(done).rjust(7)
else:
progress = ' ' * prog_len
done = total = 'N/A'.center(7)
# Show Progress
prog_bar = "[%s] %s | %s" % (progress, done, total)
screen.addstr(1, 2, prog_bar, normal)
screen.refresh()
def sub_selection_dialog(self, pos):
"""
Draw a `Selection Dialog` on screen used to make configurations.
:param pos: Index of selected item in `Configure Setting` frame.
:type pos: int
.. warning:: The value of `pos` MUST NOT be `None`.
:return: A **WindowObject** which represents the selection dialog.
:rtype: WindowObject
"""
i_len = len(self.settings[pos][2])
# Draw Shadow
shadow = curses.newwin(i_len + 2, 18, 13 - i_len / 2, 31)
shadow.bkgd(' ', curses.color_pair(8))
shadow.refresh()
# Draw Subwindow
screen = curses.newwin(i_len + 2, 18, 12 - i_len / 2, 30)
screen.box()
screen.bkgd(' ', curses.color_pair(1))
screen.keypad(1)
# Set local variable
normal = curses.A_NORMAL
# Title of Subwindow
screen.addstr(0, 3, self.settings[pos][0].center(12), normal)
return screen
def sub_selection_dialog_items(self, pos, i_pos, screen):
"""
Draw items in `Selection Dialog`.
:param pos: Index of selected item in `Configure Setting` frame.
:type pos: int
:param i_pos: Index of selected item in `Selection Dialog`.
:type i_pos: int
:param screen: A **WindowObject** which represents the selection
dialog.
:type screen: WindowObject
"""
# Set local variable
normal = curses.A_NORMAL
select = normal + curses.A_BOLD
for p, item in enumerate(self.settings[pos][2]):
item_str = item if pos else item["tag"]
screen.addstr(1 + p, 2, item_str,
select if p == i_pos else normal)
screen.refresh()
def setup_menu(self):
"""
Draw the main frame of `Setup` tab in the TUI window.
"""
screen = self._stdscr.subwin(21, 80, 2, 0)
screen.box()
screen.bkgd(' ', curses.color_pair(4))
# Configuration Section
screen.addch(0, 26, curses.ACS_BSSS)
screen.vline(1, 26, curses.ACS_VLINE, 17)
# Status Section
screen.addch(7, 0, curses.ACS_SSSB)
screen.addch(7, 26, curses.ACS_SBSS)
screen.hline(7, 1, curses.ACS_HLINE, 25)
# Select Functions Section
screen.addch(0, 52, curses.ACS_BSSS)
screen.vline(1, 52, curses.ACS_VLINE, 17)
# Process Bar Section
screen.addch(18, 0, curses.ACS_SSSB)
screen.addch(18, 79, curses.ACS_SBSS)
screen.hline(18, 1, curses.ACS_HLINE, 78)
screen.addch(18, 26, curses.ACS_SSBS)
screen.addch(18, 52, curses.ACS_SSBS)
# Section Titles
title = curses.color_pair(6)
subtitles = [["Configure Settings", (1, 2)], ["Status", (8, 2)],
["Hosts File", (13, 2)], ["Select Functions", (1, 28)]]
for s_title in subtitles:
cord = s_title[1]
screen.addstr(cord[0], cord[1], s_title[0], title)
screen.hline(cord[0] + 1, cord[1], curses.ACS_HLINE, 23)
screen.refresh()
@staticmethod
def messagebox(msg, mode=0):
"""
Draw a `Message Box` with :attr:`msg` in a specified type defined by
:attr:`mode`.
.. note:: This is a `static` method.
:param msg: The information to be displayed in message box.
:type msg: str
:param mode: A flag indicating the type of message box to be
displayed. The default value of `mode` is `0`.
==== ===========================================================
mode Message Box Type
==== ===========================================================
0 A simple message box showing a message without any buttons.
1 A message box with an `OK` button for user to confirm.
2 A message box with `OK` and `Cancel` buttons for user to
choose.
==== ===========================================================
:type mode: int
:return: A flag indicating the choice made by user.
====== =====================================================
Return Condition
====== =====================================================
0 #. No button is pressed while :attr:`mode` is `0`.
#. `OK` button pressed while :attr:`mode` is `1`.
#. `Cancel` button pressed while :attr:`mode` is `2`.
1 `OK` button pressed while :attr:`mode` is `2`.
====== =====================================================
:rtype: int
"""
pos_x = 24 if mode == 0 else 20
pos_y = 10
width = 30 if mode == 0 else 40
height = 2
messages = CommonUtil.cut_message(msg, width - 4)
height += len(messages)
if mode:
height += 2
# Draw Shadow
shadow = curses.newwin(height, width, pos_y + 1, pos_x + 1)
shadow.bkgd(' ', curses.color_pair(8))
shadow.refresh()
# Draw Subwindow
screen = curses.newwin(height, width, pos_y, pos_x)
screen.box()
screen.bkgd(' ', curses.color_pair(2))
screen.keypad(1)
# Set local variable
normal = curses.A_NORMAL
select = curses.A_REVERSE
# Insert messages
for i in range(len(messages)):
screen.addstr(1 + i, 2, messages[i].center(width - 4), normal)
if mode == 0:
screen.refresh()
else:
# Draw subwindow frame
line_height = 1 + len(messages)
screen.hline(line_height, 1, curses.ACS_HLINE, width - 2)
screen.addch(line_height, 0, curses.ACS_SSSB)
screen.addch(line_height, width - 1, curses.ACS_SBSS)
tab = 0
key_in = None
while key_in != 27:
if mode == 1:
choices = ["OK"]
elif mode == 2:
choices = ["OK", "Cancel"]
else:
return 0
for i, item in enumerate(choices):
item_str = ''.join(['[', item, ']'])
tab_pos_x = 6 + 20 * i if mode == 2 else 18
screen.addstr(line_height + 1, tab_pos_x, item_str,
select if i == tab else normal)
screen.refresh()
key_in = screen.getch()
if mode == 2:
# OK or Cancel
if key_in in [9, curses.KEY_LEFT, curses.KEY_RIGHT]:
tab = [1, 0][tab]
if key_in in [ord('a'), ord('c')]:
key_in -= (ord('a') - ord('A'))
if key_in in [ord('C'), ord('O')]:
return [ord('C'), ord('O')].index(key_in)
if key_in in [10, 32]:
return not tab
return 0
| gpl-3.0 |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.8.0/Python-3.8.0/Lib/lib2to3/pytree.py | 33 | 27960 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""
Python parse tree definitions.
This is a very concrete parse tree; we need to keep every token and
even the comments and whitespace between tokens.
There's also a pattern matching implementation here.
"""
__author__ = "Guido van Rossum <guido@python.org>"
import sys
from io import StringIO
HUGE = 0x7FFFFFFF # maximum repeat count, default max
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
class Base(object):
"""
Abstract base class for Node and Leaf.
This provides some default functionality and boilerplate using the
template pattern.
A node may be a subnode of at most one parent.
"""
# Default values for instance variables
type = None # int: token number (< 256) or symbol number (>= 256)
parent = None # Parent node pointer, or None
children = () # Tuple of subnodes
was_changed = False
was_checked = False
def __new__(cls, *args, **kwds):
"""Constructor that prevents Base from being instantiated."""
assert cls is not Base, "Cannot instantiate Base"
return object.__new__(cls)
def __eq__(self, other):
"""
Compare two nodes for equality.
This calls the method _eq().
"""
if self.__class__ is not other.__class__:
return NotImplemented
return self._eq(other)
__hash__ = None # For Py3 compatibility.
def _eq(self, other):
"""
Compare two nodes for equality.
This is called by __eq__ and __ne__. It is only called if the two nodes
have the same type. This must be implemented by the concrete subclass.
Nodes should be considered equal if they have the same structure,
ignoring the prefix string and other context information.
"""
raise NotImplementedError
def clone(self):
"""
Return a cloned (deep) copy of self.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def post_order(self):
"""
Return a post-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def pre_order(self):
"""
Return a pre-order iterator for the tree.
This must be implemented by the concrete subclass.
"""
raise NotImplementedError
def replace(self, new):
"""Replace this node with a new one in the parent."""
assert self.parent is not None, str(self)
assert new is not None
if not isinstance(new, list):
new = [new]
l_children = []
found = False
for ch in self.parent.children:
if ch is self:
assert not found, (self.parent.children, self, new)
if new is not None:
l_children.extend(new)
found = True
else:
l_children.append(ch)
assert found, (self.children, self, new)
self.parent.changed()
self.parent.children = l_children
for x in new:
x.parent = self.parent
self.parent = None
def get_lineno(self):
"""Return the line number which generated the invocant node."""
node = self
while not isinstance(node, Leaf):
if not node.children:
return
node = node.children[0]
return node.lineno
def changed(self):
if self.parent:
self.parent.changed()
self.was_changed = True
def remove(self):
"""
Remove the node from the tree. Returns the position of the node in its
parent's children before it was removed.
"""
if self.parent:
for i, node in enumerate(self.parent.children):
if node is self:
self.parent.changed()
del self.parent.children[i]
self.parent = None
return i
@property
def next_sibling(self):
"""
The node immediately following the invocant in their parent's children
list. If the invocant does not have a next sibling, it is None
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
try:
return self.parent.children[i+1]
except IndexError:
return None
@property
def prev_sibling(self):
"""
The node immediately preceding the invocant in their parent's children
list. If the invocant does not have a previous sibling, it is None.
"""
if self.parent is None:
return None
# Can't use index(); we need to test by identity
for i, child in enumerate(self.parent.children):
if child is self:
if i == 0:
return None
return self.parent.children[i-1]
def leaves(self):
for child in self.children:
yield from child.leaves()
def depth(self):
if self.parent is None:
return 0
return 1 + self.parent.depth()
def get_suffix(self):
"""
Return the string immediately following the invocant node. This is
effectively equivalent to node.next_sibling.prefix
"""
next_sib = self.next_sibling
if next_sib is None:
return ""
return next_sib.prefix
if sys.version_info < (3, 0):
def __str__(self):
return str(self).encode("ascii")
class Node(Base):
"""Concrete implementation for interior nodes."""
def __init__(self,type, children,
context=None,
prefix=None,
fixers_applied=None):
"""
Initializer.
Takes a type constant (a symbol number >= 256), a sequence of
child nodes, and an optional context keyword argument.
As a side effect, the parent pointers of the children are updated.
"""
assert type >= 256, type
self.type = type
self.children = list(children)
for ch in self.children:
assert ch.parent is None, repr(ch)
ch.parent = self
if prefix is not None:
self.prefix = prefix
if fixers_applied:
self.fixers_applied = fixers_applied[:]
else:
self.fixers_applied = None
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%s, %r)" % (self.__class__.__name__,
type_repr(self.type),
self.children)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return "".join(map(str, self.children))
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Node(self.type, [ch.clone() for ch in self.children],
fixers_applied=self.fixers_applied)
def post_order(self):
"""Return a post-order iterator for the tree."""
for child in self.children:
yield from child.post_order()
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
for child in self.children:
yield from child.pre_order()
@property
def prefix(self):
"""
The whitespace and comments preceding this node in the input.
"""
if not self.children:
return ""
return self.children[0].prefix
@prefix.setter
def prefix(self, prefix):
if self.children:
self.children[0].prefix = prefix
def set_child(self, i, child):
"""
Equivalent to 'node.children[i] = child'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children[i].parent = None
self.children[i] = child
self.changed()
def insert_child(self, i, child):
"""
Equivalent to 'node.children.insert(i, child)'. This method also sets
the child's parent attribute appropriately.
"""
child.parent = self
self.children.insert(i, child)
self.changed()
def append_child(self, child):
"""
Equivalent to 'node.children.append(child)'. This method also sets the
child's parent attribute appropriately.
"""
child.parent = self
self.children.append(child)
self.changed()
class Leaf(Base):
"""Concrete implementation for leaf nodes."""
# Default values for instance variables
_prefix = "" # Whitespace and comments preceding this token in the input
lineno = 0 # Line where this token starts in the input
column = 0 # Column where this token tarts in the input
def __init__(self, type, value,
context=None,
prefix=None,
fixers_applied=[]):
"""
Initializer.
Takes a type constant (a token number < 256), a string value, and an
optional context keyword argument.
"""
assert 0 <= type < 256, type
if context is not None:
self._prefix, (self.lineno, self.column) = context
self.type = type
self.value = value
if prefix is not None:
self._prefix = prefix
self.fixers_applied = fixers_applied[:]
def __repr__(self):
"""Return a canonical string representation."""
return "%s(%r, %r)" % (self.__class__.__name__,
self.type,
self.value)
def __unicode__(self):
"""
Return a pretty string representation.
This reproduces the input source exactly.
"""
return self.prefix + str(self.value)
if sys.version_info > (3, 0):
__str__ = __unicode__
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.value) == (other.type, other.value)
def clone(self):
"""Return a cloned (deep) copy of self."""
return Leaf(self.type, self.value,
(self.prefix, (self.lineno, self.column)),
fixers_applied=self.fixers_applied)
def leaves(self):
yield self
def post_order(self):
"""Return a post-order iterator for the tree."""
yield self
def pre_order(self):
"""Return a pre-order iterator for the tree."""
yield self
@property
def prefix(self):
"""
The whitespace and comments preceding this token in the input.
"""
return self._prefix
@prefix.setter
def prefix(self, prefix):
self.changed()
self._prefix = prefix
def convert(gr, raw_node):
"""
Convert raw node information to a Node or Leaf instance.
This is passed to the parser driver which calls it whenever a reduction of a
grammar rule produces a new complete node, so that the tree is build
strictly bottom-up.
"""
type, value, context, children = raw_node
if children or type in gr.number2symbol:
# If there's exactly one child, return that child instead of
# creating a new node.
if len(children) == 1:
return children[0]
return Node(type, children, context=context)
else:
return Leaf(type, value, context=context)
class BasePattern(object):
"""
A pattern is a tree matching pattern.
It looks for a specific node type (token or symbol), and
optionally for a specific content.
This is an abstract base class. There are three concrete
subclasses:
- LeafPattern matches a single leaf node;
- NodePattern matches a single node (usually non-leaf);
- WildcardPattern matches a sequence of nodes of variable length.
"""
# Defaults for instance variables
type = None # Node type (token if < 256, symbol if >= 256)
content = None # Optional content matching pattern
name = None # Optional name used to store match in results dict
def __new__(cls, *args, **kwds):
"""Constructor that prevents BasePattern from being instantiated."""
assert cls is not BasePattern, "Cannot instantiate BasePattern"
return object.__new__(cls)
def __repr__(self):
args = [type_repr(self.type), self.content, self.name]
while args and args[-1] is None:
del args[-1]
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
def optimize(self):
"""
A subclass can define this as a hook for optimizations.
Returns either self or another node with the same effect.
"""
return self
def match(self, node, results=None):
"""
Does this pattern exactly match a node?
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
Default implementation for non-wildcard patterns.
"""
if self.type is not None and node.type != self.type:
return False
if self.content is not None:
r = None
if results is not None:
r = {}
if not self._submatch(node, r):
return False
if r:
results.update(r)
if results is not None and self.name:
results[self.name] = node
return True
def match_seq(self, nodes, results=None):
"""
Does this pattern exactly match a sequence of nodes?
Default implementation for non-wildcard patterns.
"""
if len(nodes) != 1:
return False
return self.match(nodes[0], results)
def generate_matches(self, nodes):
"""
Generator yielding all matches for this pattern.
Default implementation for non-wildcard patterns.
"""
r = {}
if nodes and self.match(nodes[0], r):
yield 1, r
class LeafPattern(BasePattern):
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given must be a token type (< 256). If not given,
this matches any *leaf* node; the content may still be required.
The content, if given, must be a string.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert 0 <= type < 256, type
if content is not None:
assert isinstance(content, str), repr(content)
self.type = type
self.content = content
self.name = name
def match(self, node, results=None):
"""Override match() to insist on a leaf node."""
if not isinstance(node, Leaf):
return False
return BasePattern.match(self, node, results)
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
return self.content == node.value
class NodePattern(BasePattern):
wildcards = False
def __init__(self, type=None, content=None, name=None):
"""
Initializer. Takes optional type, content, and name.
The type, if given, must be a symbol type (>= 256). If the
type is None this matches *any* single node (leaf or not),
except if content is not None, in which it only matches
non-leaf nodes that also match the content pattern.
The content, if not None, must be a sequence of Patterns that
must match the node's children exactly. If the content is
given, the type must not be None.
If a name is given, the matching node is stored in the results
dict under that key.
"""
if type is not None:
assert type >= 256, type
if content is not None:
assert not isinstance(content, str), repr(content)
content = list(content)
for i, item in enumerate(content):
assert isinstance(item, BasePattern), (i, item)
if isinstance(item, WildcardPattern):
self.wildcards = True
self.type = type
self.content = content
self.name = name
def _submatch(self, node, results=None):
"""
Match the pattern's content to the node's children.
This assumes the node type matches and self.content is not None.
Returns True if it matches, False if not.
If results is not None, it must be a dict which will be
updated with the nodes matching named subpatterns.
When returning False, the results dict may still be updated.
"""
if self.wildcards:
for c, r in generate_matches(self.content, node.children):
if c == len(node.children):
if results is not None:
results.update(r)
return True
return False
if len(self.content) != len(node.children):
return False
for subpattern, child in zip(self.content, node.children):
if not subpattern.match(child, results):
return False
return True
class WildcardPattern(BasePattern):
"""
A wildcard pattern can match zero or more nodes.
This has all the flexibility needed to implement patterns like:
.* .+ .? .{m,n}
(a b c | d e | f)
(...)* (...)+ (...)? (...){m,n}
except it always uses non-greedy matching.
"""
def __init__(self, content=None, min=0, max=HUGE, name=None):
"""
Initializer.
Args:
content: optional sequence of subsequences of patterns;
if absent, matches one node;
if present, each subsequence is an alternative [*]
min: optional minimum number of times to match, default 0
max: optional maximum number of times to match, default HUGE
name: optional name assigned to this match
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
equivalent to (a b c | d e | f g h); if content is None,
this is equivalent to '.' in regular expression terms.
The min and max parameters work as follows:
min=0, max=maxint: .*
min=1, max=maxint: .+
min=0, max=1: .?
min=1, max=1: .
If content is not None, replace the dot with the parenthesized
list of alternatives, e.g. (a b c | d e | f g h)*
"""
assert 0 <= min <= max <= HUGE, (min, max)
if content is not None:
content = tuple(map(tuple, content)) # Protect against alterations
# Check sanity of alternatives
assert len(content), repr(content) # Can't have zero alternatives
for alt in content:
assert len(alt), repr(alt) # Can have empty alternatives
self.content = content
self.min = min
self.max = max
self.name = name
def optimize(self):
"""Optimize certain stacked wildcard patterns."""
subpattern = None
if (self.content is not None and
len(self.content) == 1 and len(self.content[0]) == 1):
subpattern = self.content[0][0]
if self.min == 1 and self.max == 1:
if self.content is None:
return NodePattern(name=self.name)
if subpattern is not None and self.name == subpattern.name:
return subpattern.optimize()
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
subpattern.min <= 1 and self.name == subpattern.name):
return WildcardPattern(subpattern.content,
self.min*subpattern.min,
self.max*subpattern.max,
subpattern.name)
return self
def match(self, node, results=None):
"""Does this pattern exactly match a node?"""
return self.match_seq([node], results)
def match_seq(self, nodes, results=None):
"""Does this pattern exactly match a sequence of nodes?"""
for c, r in self.generate_matches(nodes):
if c == len(nodes):
if results is not None:
results.update(r)
if self.name:
results[self.name] = list(nodes)
return True
return False
def generate_matches(self, nodes):
"""
Generator yielding matches for a sequence of nodes.
Args:
nodes: sequence of nodes
Yields:
(count, results) tuples where:
count: the match comprises nodes[:count];
results: dict containing named submatches.
"""
if self.content is None:
# Shortcut for special case (see __init__.__doc__)
for count in range(self.min, 1 + min(len(nodes), self.max)):
r = {}
if self.name:
r[self.name] = nodes[:count]
yield count, r
elif self.name == "bare_name":
yield self._bare_name_matches(nodes)
else:
# The reason for this is that hitting the recursion limit usually
# results in some ugly messages about how RuntimeErrors are being
# ignored. We only have to do this on CPython, though, because other
# implementations don't have this nasty bug in the first place.
if hasattr(sys, "getrefcount"):
save_stderr = sys.stderr
sys.stderr = StringIO()
try:
for count, r in self._recursive_matches(nodes, 0):
if self.name:
r[self.name] = nodes[:count]
yield count, r
except RuntimeError:
# We fall back to the iterative pattern matching scheme if the recursive
# scheme hits the recursion limit.
for count, r in self._iterative_matches(nodes):
if self.name:
r[self.name] = nodes[:count]
yield count, r
finally:
if hasattr(sys, "getrefcount"):
sys.stderr = save_stderr
def _iterative_matches(self, nodes):
"""Helper to iteratively yield the matches."""
nodelen = len(nodes)
if 0 >= self.min:
yield 0, {}
results = []
# generate matches that use just one alt from self.content
for alt in self.content:
for c, r in generate_matches(alt, nodes):
yield c, r
results.append((c, r))
# for each match, iterate down the nodes
while results:
new_results = []
for c0, r0 in results:
# stop if the entire set of nodes has been matched
if c0 < nodelen and c0 <= self.max:
for alt in self.content:
for c1, r1 in generate_matches(alt, nodes[c0:]):
if c1 > 0:
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
new_results.append((c0 + c1, r))
results = new_results
def _bare_name_matches(self, nodes):
"""Special optimized matcher for bare_name."""
count = 0
r = {}
done = False
max = len(nodes)
while not done and count < max:
done = True
for leaf in self.content:
if leaf[0].match(nodes[count], r):
count += 1
done = False
break
r[self.name] = nodes[:count]
return count, r
def _recursive_matches(self, nodes, count):
"""Helper to recursively yield the matches."""
assert self.content is not None
if count >= self.min:
yield 0, {}
if count < self.max:
for alt in self.content:
for c0, r0 in generate_matches(alt, nodes):
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
class NegatedPattern(BasePattern):
def __init__(self, content=None):
"""
Initializer.
The argument is either a pattern or None. If it is None, this
only matches an empty sequence (effectively '$' in regex
lingo). If it is not None, this matches whenever the argument
pattern doesn't have any matches.
"""
if content is not None:
assert isinstance(content, BasePattern), repr(content)
self.content = content
def match(self, node):
# We never match a node in its entirety
return False
def match_seq(self, nodes):
# We only match an empty sequence of nodes in its entirety
return len(nodes) == 0
def generate_matches(self, nodes):
if self.content is None:
# Return a match if there is an empty sequence
if len(nodes) == 0:
yield 0, {}
else:
# Return a match if the argument pattern has no matches
for c, r in self.content.generate_matches(nodes):
return
yield 0, {}
def generate_matches(patterns, nodes):
"""
Generator yielding matches for a sequence of patterns and nodes.
Args:
patterns: a sequence of patterns
nodes: a sequence of nodes
Yields:
(count, results) tuples where:
count: the entire sequence of patterns matches nodes[:count];
results: dict containing named submatches.
"""
if not patterns:
yield 0, {}
else:
p, rest = patterns[0], patterns[1:]
for c0, r0 in p.generate_matches(nodes):
if not rest:
yield c0, r0
else:
for c1, r1 in generate_matches(rest, nodes[c0:]):
r = {}
r.update(r0)
r.update(r1)
yield c0 + c1, r
| apache-2.0 |
beiko-lab/gengis | bin/Lib/site-packages/numpy/lib/utils.py | 1 | 38119 | import os
import sys
import types
import re
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
from numpy.core import product, ndarray, ufunc
__all__ = ['issubclass_', 'issubsctype', 'issubdtype',
'deprecate', 'deprecate_with_doc', 'get_numarray_include',
'get_include', 'info', 'source', 'who', 'lookfor', 'byte_bounds',
'may_share_memory', 'safe_eval']
def get_include():
"""
Return the directory that contains the NumPy \\*.h header files.
Extension modules that need to compile against NumPy should use this
function to locate the appropriate include directory.
Notes
-----
When using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_include()])
...
"""
import numpy
if numpy.show_config is None:
# running from numpy source directory
d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
else:
# using installed numpy core headers
import numpy.core as core
d = os.path.join(os.path.dirname(core.__file__), 'include')
return d
def get_numarray_include(type=None):
"""
Return the directory that contains the numarray \\*.h header files.
Extension modules that need to compile against numarray should use this
function to locate the appropriate include directory.
Parameters
----------
type : any, optional
If `type` is not None, the location of the NumPy headers is returned
as well.
Returns
-------
dirs : str or list of str
If `type` is None, `dirs` is a string containing the path to the
numarray headers.
If `type` is not None, `dirs` is a list of strings with first the
path(s) to the numarray headers, followed by the path to the NumPy
headers.
Notes
-----
Useful when using ``distutils``, for example in ``setup.py``.
::
import numpy as np
...
Extension('extension_name', ...
include_dirs=[np.get_numarray_include()])
...
"""
from numpy.numarray import get_numarray_include_dirs
include_dirs = get_numarray_include_dirs()
if type is None:
return include_dirs[0]
else:
return include_dirs + [get_include()]
if sys.version_info < (2, 4):
# Can't set __name__ in 2.3
import new
def _set_function_name(func, name):
func = new.function(func.func_code, func.func_globals,
name, func.func_defaults, func.func_closure)
return func
else:
def _set_function_name(func, name):
func.__name__ = name
return func
class _Deprecate(object):
"""
Decorator class to deprecate old functions.
Refer to `deprecate` for details.
See Also
--------
deprecate
"""
def __init__(self, old_name=None, new_name=None, message=None):
self.old_name = old_name
self.new_name = new_name
self.message = message
def __call__(self, func, *args, **kwargs):
"""
Decorator call. Refer to ``decorate``.
"""
old_name = self.old_name
new_name = self.new_name
message = self.message
import warnings
if old_name is None:
try:
old_name = func.func_name
except AttributeError:
old_name = func.__name__
if new_name is None:
depdoc = "`%s` is deprecated!" % old_name
else:
depdoc = "`%s` is deprecated, use `%s` instead!" % \
(old_name, new_name)
if message is not None:
depdoc += "\n" + message
def newfunc(*args,**kwds):
"""`arrayrange` is deprecated, use `arange` instead!"""
warnings.warn(depdoc, DeprecationWarning)
return func(*args, **kwds)
newfunc = _set_function_name(newfunc, old_name)
doc = func.__doc__
if doc is None:
doc = depdoc
else:
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
d = func.__dict__
except AttributeError:
pass
else:
newfunc.__dict__.update(d)
return newfunc
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
docstring, rebinds ``old_name.__name__`` and returns the new
function object.
This function may also be used as a decorator.
Parameters
----------
func : function
The function to be deprecated.
old_name : str, optional
The name of the function to be deprecated. Default is None, in which
case the name of `func` is used.
new_name : str, optional
The new name for the function. Default is None, in which case
the deprecation message is that `old_name` is deprecated. If given,
the deprecation message is that `old_name` is deprecated and `new_name`
should be used instead.
message : str, optional
Additional explanation of the deprecation. Displayed in the docstring
after the warning.
Returns
-------
old_func : function
The deprecated function.
Examples
--------
Note that ``olduint`` returns a value after printing Deprecation Warning:
>>> olduint = np.deprecate(np.uint)
>>> olduint(6)
/usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
DeprecationWarning: uint32 is deprecated
warnings.warn(str1, DeprecationWarning)
6
"""
# Deprecate may be run as a function or as a decorator
# If run as a function, we initialise the decorator class
# and execute its __call__ method.
if args:
fn = args[0]
args = args[1:]
# backward compatibility -- can be removed
# after next release
if 'newname' in kwargs:
kwargs['new_name'] = kwargs.pop('newname')
if 'oldname' in kwargs:
kwargs['old_name'] = kwargs.pop('oldname')
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
deprecate_with_doc = lambda msg: _Deprecate(message=msg)
#--------------------------------------------
# Determine if two arrays can share memory
#--------------------------------------------
def byte_bounds(a):
"""
Returns pointers to the end-points of an array.
Parameters
----------
a : ndarray
Input array. It must conform to the Python-side of the array interface.
Returns
-------
(low, high) : tuple of 2 integers
The first integer is the first byte of the array, the second integer is
just past the last byte of the array. If `a` is not contiguous it
will not use every byte between the (`low`, `high`) values.
Examples
--------
>>> I = np.eye(2, dtype='f'); I.dtype
dtype('float32')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
>>> I = np.eye(2, dtype='G'); I.dtype
dtype('complex192')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
"""
ai = a.__array_interface__
a_data = ai['data'][0]
astrides = ai['strides']
ashape = ai['shape']
bytes_a = int(ai['typestr'][2:])
a_low = a_high = a_data
if astrides is None: # contiguous case
a_high += a.size * bytes_a
else:
for shape, stride in zip(ashape, astrides):
if stride < 0:
a_low += (shape-1)*stride
else:
a_high += (shape-1)*stride
a_high += bytes_a
return a_low, a_high
def may_share_memory(a, b):
"""
Determine if two arrays can share memory
The memory-bounds of a and b are computed. If they overlap then
this function returns True. Otherwise, it returns False.
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Parameters
----------
a, b : ndarray
Returns
-------
out : bool
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
"""
a_low, a_high = byte_bounds(a)
b_low, b_high = byte_bounds(b)
if b_low >= a_high or a_low >= b_high:
return False
return True
#-----------------------------------------------------------------------------
# Function for output and information on the variables used.
#-----------------------------------------------------------------------------
def who(vardict=None):
"""
Print the Numpy arrays in the given dictionary.
If there is no dictionary passed in or `vardict` is None then returns
Numpy arrays in the globals() dictionary (all Numpy arrays in the
namespace).
Parameters
----------
vardict : dict, optional
A dictionary possibly containing ndarrays. Default is globals().
Returns
-------
out : None
Returns 'None'.
Notes
-----
Prints out the name, shape, bytes and type of all of the ndarrays present
in `vardict`.
Examples
--------
>>> a = np.arange(10)
>>> b = np.ones(20)
>>> np.who()
Name Shape Bytes Type
===========================================================
a 10 40 int32
b 20 160 float64
Upper bound on total bytes = 200
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
y 3 24 float64
x 2 16 float64
Upper bound on total bytes = 40
"""
if vardict is None:
frame = sys._getframe().f_back
vardict = frame.f_globals
sta = []
cache = {}
for name in vardict.keys():
if isinstance(vardict[name],ndarray):
var = vardict[name]
idv = id(var)
if idv in cache.keys():
namestr = name + " (%s)" % cache[idv]
original=0
else:
cache[idv] = name
namestr = name
original=1
shapestr = " x ".join(map(str, var.shape))
bytestr = str(var.nbytes)
sta.append([namestr, shapestr, bytestr, var.dtype.name,
original])
maxname = 0
maxshape = 0
maxbyte = 0
totalbytes = 0
for k in range(len(sta)):
val = sta[k]
if maxname < len(val[0]):
maxname = len(val[0])
if maxshape < len(val[1]):
maxshape = len(val[1])
if maxbyte < len(val[2]):
maxbyte = len(val[2])
if val[4]:
totalbytes += int(val[2])
if len(sta) > 0:
sp1 = max(10,maxname)
sp2 = max(10,maxshape)
sp3 = max(10,maxbyte)
prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
print prval + "\n" + "="*(len(prval)+5) + "\n"
for k in range(len(sta)):
val = sta[k]
print "%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
val[1], ' '*(sp2-len(val[1])+5),
val[2], ' '*(sp3-len(val[2])+5),
val[3])
print "\nUpper bound on total bytes = %d" % totalbytes
return
#-----------------------------------------------------------------------------
# NOTE: pydoc defines a help function which works simliarly to this
# except it uses a pager to take over the screen.
# combine name and arguments and split to multiple lines of
# width characters. End lines on a comma and begin argument list
# indented with the rest of the arguments.
def _split_line(name, arguments, width):
firstwidth = len(name)
k = firstwidth
newstr = name
sepstr = ", "
arglist = arguments.split(sepstr)
for argument in arglist:
if k == firstwidth:
addstr = ""
else:
addstr = sepstr
k = k + len(argument) + len(addstr)
if k > width:
k = firstwidth + 1 + len(argument)
newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
else:
newstr = newstr + addstr + argument
return newstr
_namedict = None
_dictlist = None
# Traverse all module directories underneath globals
# to see if something is defined
def _makenamedict(module='numpy'):
module = __import__(module, globals(), locals(), [])
thedict = {module.__name__:module.__dict__}
dictlist = [module.__name__]
totraverse = [module.__dict__]
while 1:
if len(totraverse) == 0:
break
thisdict = totraverse.pop(0)
for x in thisdict.keys():
if isinstance(thisdict[x],types.ModuleType):
modname = thisdict[x].__name__
if modname not in dictlist:
moddict = thisdict[x].__dict__
dictlist.append(modname)
totraverse.append(moddict)
thedict[modname] = moddict
return thedict, dictlist
def info(object=None,maxwidth=76,output=sys.stdout,toplevel='numpy'):
"""
Get help information for a function, class, or module.
Parameters
----------
object : object or str, optional
Input object or name to get information about. If `object` is a
numpy object, its docstring is given. If it is a string, available
modules are searched for matching objects.
If None, information about `info` itself is returned.
maxwidth : int, optional
Printing width.
output : file like object, optional
File like object that the output is written to, default is ``stdout``.
The object has to be opened in 'w' or 'a' mode.
toplevel : str, optional
Start search at this level.
See Also
--------
source, lookfor
Notes
-----
When used interactively with an object, ``np.info(obj)`` is equivalent to
``help(obj)`` on the Python prompt or ``obj?`` on the IPython prompt.
Examples
--------
>>> np.info(np.polyval) # doctest: +SKIP
polyval(p, x)
Evaluate the polynomial p at x.
...
When using a string for `object` it is possible to get multiple results.
>>> np.info('fft') # doctest: +SKIP
*** Found in numpy ***
Core FFT routines
...
*** Found in numpy.fft ***
fft(a, n=None, axis=-1)
...
*** Repeat reference found in numpy.fft.fftpack ***
*** Total of 3 references found. ***
"""
global _namedict, _dictlist
# Local import to speed up numpy's import time.
import pydoc, inspect
if hasattr(object,'_ppimport_importer') or \
hasattr(object, '_ppimport_module'):
object = object._ppimport_module
elif hasattr(object, '_ppimport_attr'):
object = object._ppimport_attr
if object is None:
info(info)
elif isinstance(object, ndarray):
import numpy.numarray as nn
nn.info(object, output=output, numpy=1)
elif isinstance(object, str):
if _namedict is None:
_namedict, _dictlist = _makenamedict(toplevel)
numfound = 0
objlist = []
for namestr in _dictlist:
try:
obj = _namedict[namestr][object]
if id(obj) in objlist:
print >> output, "\n *** Repeat reference found in %s *** " % namestr
else:
objlist.append(id(obj))
print >> output, " *** Found in %s ***" % namestr
info(obj)
print >> output, "-"*maxwidth
numfound += 1
except KeyError:
pass
if numfound == 0:
print >> output, "Help for %s not found." % object
else:
print >> output, "\n *** Total of %d references found. ***" % numfound
elif inspect.isfunction(object):
name = object.func_name
arguments = inspect.formatargspec(*inspect.getargspec(object))
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif inspect.isclass(object):
name = object.__name__
arguments = "()"
try:
if hasattr(object, '__init__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__init__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
except:
pass
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc1 = inspect.getdoc(object)
if doc1 is None:
if hasattr(object,'__init__'):
print >> output, inspect.getdoc(object.__init__)
else:
print >> output, inspect.getdoc(object)
methods = pydoc.allmethods(object)
if methods != []:
print >> output, "\n\nMethods:\n"
for meth in methods:
if meth[0] == '_':
continue
thisobj = getattr(object, meth, None)
if thisobj is not None:
methstr, other = pydoc.splitdoc(inspect.getdoc(thisobj) or "None")
print >> output, " %s -- %s" % (meth, methstr)
elif (sys.version_info[0] < 3
and isinstance(object, types.InstanceType)):
# check for __call__ method
# types.InstanceType is the type of the instances of oldstyle classes
print >> output, "Instance of class: ", object.__class__.__name__
print >> output
if hasattr(object, '__call__'):
arguments = inspect.formatargspec(*inspect.getargspec(object.__call__.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if hasattr(object,'name'):
name = "%s" % object.name
else:
name = "<name>"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
doc = inspect.getdoc(object.__call__)
if doc is not None:
print >> output, inspect.getdoc(object.__call__)
print >> output, inspect.getdoc(object)
else:
print >> output, inspect.getdoc(object)
elif inspect.ismethod(object):
name = object.__name__
arguments = inspect.formatargspec(*inspect.getargspec(object.im_func))
arglist = arguments.split(', ')
if len(arglist) > 1:
arglist[1] = "("+arglist[1]
arguments = ", ".join(arglist[1:])
else:
arguments = "()"
if len(name+arguments) > maxwidth:
argstr = _split_line(name, arguments, maxwidth)
else:
argstr = name + arguments
print >> output, " " + argstr + "\n"
print >> output, inspect.getdoc(object)
elif hasattr(object, '__doc__'):
print >> output, inspect.getdoc(object)
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a Numpy object.
The source code is only returned for objects written in Python. Many
functions and classes are defined in C and will therefore not return
useful information.
Parameters
----------
object : numpy object
Input object. This can be any object (function, class, module, ...).
output : file object, optional
If `output` not supplied then source code is printed to screen
(sys.stdout). File object must be created with either write 'w' or
append 'a' modes.
See Also
--------
lookfor, info
Examples
--------
>>> np.source(np.interp) #doctest: +SKIP
In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
def interp(x, xp, fp, left=None, right=None):
\"\"\".... (full docstring printed)\"\"\"
if isinstance(x, (float, int, number)):
return compiled_interp([x], xp, fp, left, right).item()
else:
return compiled_interp(x, xp, fp, left, right)
The source code is only returned for objects written in Python.
>>> np.source(np.array) #doctest: +SKIP
Not available for this object.
"""
# Local import to speed up numpy's import time.
import inspect
try:
print >> output, "In file: %s\n" % inspect.getsourcefile(object)
print >> output, inspect.getsource(object)
except:
print >> output, "Not available for this object."
# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
# where kind: "func", "class", "module", "object"
# and index: index in breadth-first namespace traversal
_lookfor_caches = {}
# regexp whose match indicates that the string may contain a function signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
Do a keyword search on docstrings.
A list of of objects that matched the search is displayed,
sorted by relevance. All given keywords need to be found in the
docstring for it to be returned as a result, but the order does
not matter.
Parameters
----------
what : str
String containing words to look for.
module : str or list, optional
Name of module(s) whose docstrings to go through.
import_modules : bool, optional
Whether to import sub-modules in packages. Default is True.
regenerate : bool, optional
Whether to re-generate the docstring cache. Default is False.
output : file-like, optional
File-like object to write the output to. If omitted, use a pager.
See Also
--------
source, info
Notes
-----
Relevance is determined only roughly, by checking if the keywords occur
in the function name, at the start of a docstring, etc.
Examples
--------
>>> np.lookfor('binary representation')
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
Return the binary representation of the input number as a string.
numpy.core.setup_common.long_double_representation
Given a binary dump as given by GNU od -b, look for long double
numpy.base_repr
Return a string representation of a number in the given base system.
...
"""
import pydoc
# Cache
cache = _lookfor_generate_cache(module, import_modules, regenerate)
# Search
# XXX: maybe using a real stemming search engine would be better?
found = []
whats = str(what).lower().split()
if not whats: return
for name, (docstring, kind, index) in cache.iteritems():
if kind in ('module', 'object'):
# don't show modules or objects
continue
ok = True
doc = docstring.lower()
for w in whats:
if w not in doc:
ok = False
break
if ok:
found.append(name)
# Relevance sort
# XXX: this is full Harrison-Stetson heuristics now,
# XXX: it probably could be improved
kind_relevance = {'func': 1000, 'class': 1000,
'module': -1000, 'object': -1000}
def relevance(name, docstr, kind, index):
r = 0
# do the keywords occur within the start of the docstring?
first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
r += sum([200 for w in whats if w in first_doc])
# do the keywords occur in the function name?
r += sum([30 for w in whats if w in name])
# is the full name long?
r += -len(name) * 5
# is the object of bad type?
r += kind_relevance.get(kind, -1000)
# is the object deep in namespace hierarchy?
r += -name.count('.') * 10
r += max(-index / 100, -100)
return r
def relevance_value(a):
return relevance(a, *cache[a])
found.sort(key=relevance_value)
# Pretty-print
s = "Search results for '%s'" % (' '.join(whats))
help_text = [s, "-"*len(s)]
for name in found[::-1]:
doc, kind, ix = cache[name]
doclines = [line.strip() for line in doc.strip().split("\n")
if line.strip()]
# find a suitable short description
try:
first_doc = doclines[0].strip()
if _function_signature_re.search(first_doc):
first_doc = doclines[1].strip()
except IndexError:
first_doc = ""
help_text.append("%s\n %s" % (name, first_doc))
if not found:
help_text.append("Nothing found.")
# Output
if output is not None:
output.write("\n".join(help_text))
elif len(help_text) > 10:
pager = pydoc.getpager()
pager("\n".join(help_text))
else:
print "\n".join(help_text)
def _lookfor_generate_cache(module, import_modules, regenerate):
"""
Generate docstring cache for given module.
Parameters
----------
module : str, None, module
Module for which to generate docstring cache
import_modules : bool
Whether to import sub-modules in packages.
regenerate: bool
Re-generate the docstring cache
Returns
-------
cache : dict {obj_full_name: (docstring, kind, index), ...}
Docstring cache for the module, either cached one (regenerate=False)
or newly generated.
"""
global _lookfor_caches
# Local import to speed up numpy's import time.
import inspect
from cStringIO import StringIO
if module is None:
module = "numpy"
if isinstance(module, str):
try:
__import__(module)
except ImportError:
return {}
module = sys.modules[module]
elif isinstance(module, list) or isinstance(module, tuple):
cache = {}
for mod in module:
cache.update(_lookfor_generate_cache(mod, import_modules,
regenerate))
return cache
if id(module) in _lookfor_caches and not regenerate:
return _lookfor_caches[id(module)]
# walk items and collect docstrings
cache = {}
_lookfor_caches[id(module)] = cache
seen = {}
index = 0
stack = [(module.__name__, module)]
while stack:
name, item = stack.pop(0)
if id(item) in seen: continue
seen[id(item)] = True
index += 1
kind = "object"
if inspect.ismodule(item):
kind = "module"
try:
_all = item.__all__
except AttributeError:
_all = None
# import sub-packages
if import_modules and hasattr(item, '__path__'):
for pth in item.__path__:
for mod_path in os.listdir(pth):
this_py = os.path.join(pth, mod_path)
init_py = os.path.join(pth, mod_path, '__init__.py')
if os.path.isfile(this_py) and mod_path.endswith('.py'):
to_import = mod_path[:-3]
elif os.path.isfile(init_py):
to_import = mod_path
else:
continue
if to_import == '__init__':
continue
try:
# Catch SystemExit, too
base_exc = BaseException
except NameError:
# Python 2.4 doesn't have BaseException
base_exc = Exception
try:
old_stdout = sys.stdout
old_stderr = sys.stderr
try:
sys.stdout = StringIO()
sys.stderr = StringIO()
__import__("%s.%s" % (name, to_import))
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
except base_exc:
continue
for n, v in _getmembers(item):
try:
item_name = getattr(v, '__name__', "%s.%s" % (name, n))
mod_name = getattr(v, '__module__', None)
except NameError:
# ref. SWIG's global cvars
# NameError: Unknown C global variable
item_name = "%s.%s" % (name, n)
mod_name = None
if '.' not in item_name and mod_name:
item_name = "%s.%s" % (mod_name, item_name)
if not item_name.startswith(name + '.'):
# don't crawl "foreign" objects
if isinstance(v, ufunc):
# ... unless they are ufuncs
pass
else:
continue
elif not (inspect.ismodule(v) or _all is None or n in _all):
continue
stack.append(("%s.%s" % (name, n), v))
elif inspect.isclass(item):
kind = "class"
for n, v in _getmembers(item):
stack.append(("%s.%s" % (name, n), v))
elif hasattr(item, "__call__"):
kind = "func"
try:
doc = inspect.getdoc(item)
except NameError: # ref SWIG's NameError: Unknown C global variable
doc = None
if doc is not None:
cache[name] = (doc, kind, index)
return cache
def _getmembers(item):
import inspect
try:
members = inspect.getmembers(item)
except AttributeError:
members = [(x, getattr(item, x)) for x in dir(item)
if hasattr(item, x)]
return members
#-----------------------------------------------------------------------------
# The following SafeEval class and company are adapted from Michael Spencer's
# ASPN Python Cookbook recipe:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/364469
# Accordingly it is mostly Copyright 2006 by Michael Spencer.
# The recipe, like most of the other ASPN Python Cookbook recipes was made
# available under the Python license.
# http://www.python.org/license
# It has been modified to:
# * handle unary -/+
# * support True/False/None
# * raise SyntaxError instead of a custom exception.
class SafeEval(object):
"""
Object to evaluate constant string expressions.
This includes strings with lists, dicts and tuples using the abstract
syntax tree created by ``compiler.parse``.
For an example of usage, see `safe_eval`.
See Also
--------
safe_eval
"""
if sys.version_info[0] < 3:
def visit(self, node, **kw):
cls = node.__class__
meth = getattr(self,'visit'+cls.__name__,self.default)
return meth(node, **kw)
def default(self, node, **kw):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node, **kw):
for child in node.getChildNodes():
return self.visit(child, **kw)
def visitConst(self, node, **kw):
return node.value
def visitDict(self, node,**kw):
return dict([(self.visit(k),self.visit(v)) for k,v in node.items])
def visitTuple(self, node, **kw):
return tuple([self.visit(i) for i in node.nodes])
def visitList(self, node, **kw):
return [self.visit(i) for i in node.nodes]
def visitUnaryAdd(self, node, **kw):
return +self.visit(node.getChildNodes()[0])
def visitUnarySub(self, node, **kw):
return -self.visit(node.getChildNodes()[0])
def visitName(self, node, **kw):
if node.name == 'False':
return False
elif node.name == 'True':
return True
elif node.name == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.name)
else:
def visit(self, node):
cls = node.__class__
meth = getattr(self, 'visit' + cls.__name__, self.default)
return meth(node)
def default(self, node):
raise SyntaxError("Unsupported source construct: %s"
% node.__class__)
def visitExpression(self, node):
return self.visit(node.body)
def visitNum(self, node):
return node.n
def visitStr(self, node):
return node.s
def visitBytes(self, node):
return node.s
def visitDict(self, node,**kw):
return dict([(self.visit(k), self.visit(v))
for k, v in zip(node.keys, node.values)])
def visitTuple(self, node):
return tuple([self.visit(i) for i in node.elts])
def visitList(self, node):
return [self.visit(i) for i in node.elts]
def visitUnaryOp(self, node):
import ast
if isinstance(node.op, ast.UAdd):
return +self.visit(node.operand)
elif isinstance(node.op, ast.USub):
return -self.visit(node.operand)
else:
raise SyntaxError("Unknown unary op: %r" % node.op)
def visitName(self, node):
if node.id == 'False':
return False
elif node.id == 'True':
return True
elif node.id == 'None':
return None
else:
raise SyntaxError("Unknown name: %s" % node.id)
def visitNameConstant(self, node):
return node.value
def safe_eval(source):
"""
Protected string evaluation.
Evaluate a string containing a Python literal expression without
allowing the execution of arbitrary non-literal code.
Parameters
----------
source : str
The string to evaluate.
Returns
-------
obj : object
The result of evaluating `source`.
Raises
------
SyntaxError
If the code has invalid Python syntax, or if it contains non-literal
code.
Examples
--------
>>> np.safe_eval('1')
1
>>> np.safe_eval('[1, 2, 3]')
[1, 2, 3]
>>> np.safe_eval('{"foo": ("bar", 10.0)}')
{'foo': ('bar', 10.0)}
>>> np.safe_eval('import os')
Traceback (most recent call last):
...
SyntaxError: invalid syntax
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
SyntaxError: Unsupported source construct: compiler.ast.CallFunc
"""
# Local imports to speed up numpy's import time.
import warnings
from numpy.testing.utils import WarningManager
warn_ctx = WarningManager()
warn_ctx.__enter__()
try:
# compiler package is deprecated for 3.x, which is already solved here
warnings.simplefilter('ignore', DeprecationWarning)
try:
import compiler
except ImportError:
import ast as compiler
finally:
warn_ctx.__exit__()
walker = SafeEval()
try:
ast = compiler.parse(source, mode="eval")
except SyntaxError, err:
raise
try:
return walker.visit(ast)
except SyntaxError, err:
raise
#-----------------------------------------------------------------------------
| gpl-3.0 |
Bionetbook/bionetbook | bnbapp/compare/urls.py | 2 | 1143 | from django.conf.urls.defaults import patterns, url, include
from compare import views
# from protocols import views
urlpatterns = patterns("",
url(regex=r'^select/$', view=views.CompareSelectView.as_view(), name='compare_select'),
url(regex=r'^view/single_ajax/(?P<protocol_a_slug>[-\w]+)/$', view=views.LayoutSingleView.as_view(), name='layout_sinlge_view'),
url(regex=r'^clone/single_ajax/(?P<protocol_a_slug>[-\w]+)/$', view=views.CloneLayoutSingleView.as_view(), name='clone_layout_single_view'),
url(regex=r'^view/ajax/(?P<protocol_a_slug>[-\w]+)/(?P<protocol_b_slug>[-\w]+)/$', view=views.CompareDisplayView.as_view(), name='compare_display_view'),
url(regex=r'^view/clone_ajax/(?P<protocol_a_slug>[-\w]+)/(?P<protocol_b_slug>[-\w]+)/$', view=views.CloneDisplayView.as_view(), name='clone_display_view'),
# url(regex=r'^view/clone_ajax/(?P<protocol_a_slug>[-\w]+)/(?P<protocol_b_slug>[-\w]+)/$', view=views.CloneDisplayView.as_view(), name='clone_display_view'),
# url(regex=r'^view/ajax/(?P<protocol_a_slug>[-\w]+)/(?P<protocol_b_slug>[-\w]+)/$', view=views.AjaxView.as_view(), name='ajax_view'),
)
| mit |
JRock007/boxxy | dist/Boxxy server.app/Contents/Resources/lib/python2.7/numpy/lib/arrayterator.py | 52 | 7282 | """
A buffered iterator for big arrays.
This module solves the problem of iterating over a big file-based array
without having to read it into memory. The `Arrayterator` class wraps
an array object, and when iterated it will return sub-arrays with at most
a user-specified number of elements.
"""
from __future__ import division, absolute_import, print_function
from operator import mul
from functools import reduce
from numpy.compat import long
__all__ = ['Arrayterator']
class Arrayterator(object):
"""
Buffered iterator for big arrays.
`Arrayterator` creates a buffered iterator for reading big arrays in small
contiguous blocks. The class is useful for objects stored in the
file system. It allows iteration over the object *without* reading
everything in memory; instead, small blocks are read and iterated over.
`Arrayterator` can be used with any object that supports multidimensional
slices. This includes NumPy arrays, but also variables from
Scientific.IO.NetCDF or pynetcdf for example.
Parameters
----------
var : array_like
The object to iterate over.
buf_size : int, optional
The buffer size. If `buf_size` is supplied, the maximum amount of
data that will be read into memory is `buf_size` elements.
Default is None, which will read as many element as possible
into memory.
Attributes
----------
var
buf_size
start
stop
step
shape
flat
See Also
--------
ndenumerate : Multidimensional array iterator.
flatiter : Flat array iterator.
memmap : Create a memory-map to an array stored in a binary file on disk.
Notes
-----
The algorithm works by first finding a "running dimension", along which
the blocks will be extracted. Given an array of dimensions
``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
first dimension will be used. If, on the other hand,
``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
Blocks are extracted along this dimension, and when the last block is
returned the process continues from the next dimension, until all
elements have been read.
Examples
--------
>>> import numpy as np
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
>>> a_itor = np.lib.arrayterator.Arrayterator(a, 2)
>>> a_itor.shape
(3, 4, 5, 6)
Now we can iterate over ``a_itor``, and it will return arrays of size
two. Since `buf_size` was smaller than any dimension, the first
dimension will be iterated over first:
>>> for subarr in a_itor:
... if not subarr.all():
... print subarr, subarr.shape
...
[[[[0 1]]]] (1, 1, 1, 2)
"""
def __init__(self, var, buf_size=None):
self.var = var
self.buf_size = buf_size
self.start = [0 for dim in var.shape]
self.stop = [dim for dim in var.shape]
self.step = [1 for dim in var.shape]
def __getattr__(self, attr):
return getattr(self.var, attr)
def __getitem__(self, index):
"""
Return a new arrayterator.
"""
# Fix index, handling ellipsis and incomplete slices.
if not isinstance(index, tuple):
index = (index,)
fixed = []
length, dims = len(index), len(self.shape)
for slice_ in index:
if slice_ is Ellipsis:
fixed.extend([slice(None)] * (dims-length+1))
length = len(fixed)
elif isinstance(slice_, (int, long)):
fixed.append(slice(slice_, slice_+1, 1))
else:
fixed.append(slice_)
index = tuple(fixed)
if len(index) < dims:
index += (slice(None),) * (dims-len(index))
# Return a new arrayterator object.
out = self.__class__(self.var, self.buf_size)
for i, (start, stop, step, slice_) in enumerate(
zip(self.start, self.stop, self.step, index)):
out.start[i] = start + (slice_.start or 0)
out.step[i] = step * (slice_.step or 1)
out.stop[i] = start + (slice_.stop or stop-start)
out.stop[i] = min(stop, out.stop[i])
return out
def __array__(self):
"""
Return corresponding data.
"""
slice_ = tuple(slice(*t) for t in zip(
self.start, self.stop, self.step))
return self.var[slice_]
@property
def flat(self):
"""
A 1-D flat iterator for Arrayterator objects.
This iterator returns elements of the array to be iterated over in
`Arrayterator` one by one. It is similar to `flatiter`.
See Also
--------
`Arrayterator`
flatiter
Examples
--------
>>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
>>> a_itor = np.lib.arrayterator.Arrayterator(a, 2)
>>> for subarr in a_itor.flat:
... if not subarr:
... print subarr, type(subarr)
...
0 <type 'numpy.int32'>
"""
for block in self:
for value in block.flat:
yield value
@property
def shape(self):
"""
The shape of the array to be iterated over.
For an example, see `Arrayterator`.
"""
return tuple(((stop-start-1)//step+1) for start, stop, step in
zip(self.start, self.stop, self.step))
def __iter__(self):
# Skip arrays with degenerate dimensions
if [dim for dim in self.shape if dim <= 0]:
raise StopIteration
start = self.start[:]
stop = self.stop[:]
step = self.step[:]
ndims = len(self.var.shape)
while True:
count = self.buf_size or reduce(mul, self.shape)
# iterate over each dimension, looking for the
# running dimension (ie, the dimension along which
# the blocks will be built from)
rundim = 0
for i in range(ndims-1, -1, -1):
# if count is zero we ran out of elements to read
# along higher dimensions, so we read only a single position
if count == 0:
stop[i] = start[i]+1
elif count <= self.shape[i]:
# limit along this dimension
stop[i] = start[i] + count*step[i]
rundim = i
else:
# read everything along this dimension
stop[i] = self.stop[i]
stop[i] = min(self.stop[i], stop[i])
count = count//self.shape[i]
# yield a block
slice_ = tuple(slice(*t) for t in zip(start, stop, step))
yield self.var[slice_]
# Update start position, taking care of overflow to
# other dimensions
start[rundim] = stop[rundim] # start where we stopped
for i in range(ndims-1, 0, -1):
if start[i] >= self.stop[i]:
start[i] = self.start[i]
start[i-1] += self.step[i-1]
if start[0] >= self.stop[0]:
raise StopIteration
| mit |
nop33/indico | indico/modules/rb/notifications/reservations.py | 2 | 7927 | from flask import render_template
from indico.core.notifications import email_sender, make_email
from indico.util.date_time import format_datetime
from indico.util.string import to_unicode
class ReservationNotification(object):
def __init__(self, reservation):
self.reservation = reservation
self.start_dt = format_datetime(reservation.start_dt)
def _get_email_subject(self, **mail_params):
return u'{prefix}[{room}] {subject} {date} {suffix}'.format(
prefix=to_unicode(mail_params.get('subject_prefix', '')),
room=self.reservation.room.full_name,
subject=to_unicode(mail_params.get('subject', '')),
date=to_unicode(self.start_dt),
suffix=to_unicode(mail_params.get('subject_suffix', ''))
).strip()
def _make_body(self, mail_params, **body_params):
from indico.modules.rb.models.reservations import RepeatFrequency, RepeatMapping
template_params = dict(mail_params, **body_params)
template_params['RepeatFrequency'] = RepeatFrequency
template_params['RepeatMapping'] = RepeatMapping
return render_template('rb/emails/reservations/{}.txt'.format(mail_params['template_name']), **template_params)
def compose_email_to_user(self, **mail_params):
creator = self.reservation.created_by_user
to_list = {creator.email}
if self.reservation.contact_email:
to_list.add(self.reservation.contact_email)
subject = self._get_email_subject(**mail_params)
body = self._make_body(mail_params, reservation=self.reservation)
return make_email(to_list=to_list, subject=subject, body=body)
def compose_email_to_manager(self, **mail_params):
room = self.reservation.room
to_list = {room.owner.email} | room.manager_emails | room.notification_emails
subject = self._get_email_subject(**mail_params)
body = self._make_body(mail_params, reservation=self.reservation)
return make_email(to_list=to_list, subject=subject, body=body)
def compose_email_to_vc_support(self, **mail_params):
from indico.modules.rb import rb_settings
if self.reservation.is_accepted and self.reservation.uses_vc:
to_list = rb_settings.get('vc_support_emails')
if to_list:
subject = self._get_email_subject(**mail_params)
body = self._make_body(mail_params, reservation=self.reservation)
return make_email(to_list=to_list, subject=subject, body=body)
def compose_email_to_assistance(self, **mail_params):
from indico.modules.rb import rb_settings
if self.reservation.room.notification_for_assistance:
if self.reservation.needs_assistance or mail_params.get('assistance_cancelled'):
to_list = rb_settings.get('assistance_emails')
if to_list:
subject = self._get_email_subject(**mail_params)
body = self._make_body(mail_params, reservation=self.reservation)
return make_email(to_list=to_list, subject=subject, body=body)
@email_sender
def notify_cancellation(reservation):
if not reservation.is_cancelled:
raise ValueError('Reservation is not cancelled')
notification = ReservationNotification(reservation)
return filter(None, [
notification.compose_email_to_user(
subject='Booking cancelled on',
template_name='cancellation_email_to_user'
),
notification.compose_email_to_manager(
subject='Booking cancelled on',
template_name='cancellation_email_to_manager'
),
notification.compose_email_to_vc_support(
subject='Booking cancelled on',
template_name='cancellation_email_to_vc_support'
),
notification.compose_email_to_assistance(
subject_prefix='[Support Request Cancellation]',
subject='Request cancelled for',
template_name='cancellation_email_to_assistance'
)
])
@email_sender
def notify_confirmation(reservation):
if not reservation.is_accepted:
raise ValueError('Reservation is not confirmed')
notification = ReservationNotification(reservation)
return filter(None, [
notification.compose_email_to_user(
subject='Booking confirmed on',
template_name='confirmation_email_to_user'
),
notification.compose_email_to_manager(
subject='Booking confirmed on',
template_name='confirmation_email_to_manager'
),
notification.compose_email_to_vc_support(
subject='New Booking on',
template_name='creation_email_to_vc_support'
),
notification.compose_email_to_assistance(
subject_prefix='[Support Request]',
subject='New Support on',
template_name='creation_email_to_assistance'
)
])
@email_sender
def notify_creation(reservation):
notification = ReservationNotification(reservation)
return filter(None, [
notification.compose_email_to_user(
subject='New Booking on' if reservation.is_accepted else 'Pre-Booking awaiting acceptance',
template_name='creation_email_to_user' if reservation.is_accepted else 'creation_pre_email_to_user'
),
notification.compose_email_to_manager(
subject='New booking on' if reservation.is_accepted else 'New Pre-Booking on',
template_name='creation_email_to_manager' if reservation.is_accepted else 'creation_pre_email_to_manager'
),
notification.compose_email_to_vc_support(
subject='New Booking on',
template_name='creation_email_to_vc_support'
),
notification.compose_email_to_assistance(
subject_prefix='[Support Request]',
subject='New Booking on',
template_name='creation_email_to_assistance'
)
])
@email_sender
def notify_rejection(reservation):
if not reservation.is_rejected:
raise ValueError('Reservation is not rejected')
notification = ReservationNotification(reservation)
return filter(None, [
notification.compose_email_to_user(
subject='Booking rejected on',
template_name='rejection_email_to_user',
),
notification.compose_email_to_manager(
subject='Booking rejected on',
template_name='rejection_email_to_manager',
),
notification.compose_email_to_assistance(
subject_prefix='[Support Request Cancellation]',
subject='Request cancelled for',
template_name='rejection_email_to_assistance',
)
])
@email_sender
def notify_modification(reservation, changes):
assistance_change = changes.get('needs_assistance')
assistance_cancelled = assistance_change and assistance_change['old'] and not assistance_change['new']
notification = ReservationNotification(reservation)
return filter(None, [
notification.compose_email_to_user(
subject='Booking modified on',
template_name='modification_email_to_user'
),
notification.compose_email_to_manager(
subject='Booking modified on',
template_name='modification_email_to_manager'
),
notification.compose_email_to_vc_support(
subject='Booking modified on',
template_name='modification_email_to_vc_support'
),
notification.compose_email_to_assistance(
subject_prefix='[Support Request {}]'.format('Cancelled' if assistance_cancelled else 'Modification'),
subject='Modified request on',
template_name='modification_email_to_assistance',
assistance_cancelled=assistance_cancelled
)
])
| gpl-3.0 |
maftieu/CouchPotatoServer | couchpotato/core/downloaders/nzbvortex/main.py | 4 | 5838 | from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList
from couchpotato.core.helpers.encoding import tryUrlencode, sp
from couchpotato.core.helpers.variable import cleanHost
from couchpotato.core.logger import CPLog
from urllib2 import URLError
from uuid import uuid4
import hashlib
import httplib
import json
import os
import socket
import ssl
import sys
import time
import traceback
import urllib2
log = CPLog(__name__)
class NZBVortex(Downloader):
protocol = ['nzb']
api_level = None
session_id = None
def download(self, data = None, media = None, filedata = None):
if not media: media = {}
if not data: data = {}
# Send the nzb
try:
nzb_filename = self.createFileName(data, filedata, media)
self.call('nzb/add', files = {'file': (nzb_filename, filedata)})
time.sleep(10)
raw_statuses = self.call('nzb')
nzb_id = [nzb['id'] for nzb in raw_statuses.get('nzbs', []) if os.path.basename(item['nzbFileName']) == nzb_filename][0]
return self.downloadReturnId(nzb_id)
except:
log.error('Something went wrong sending the NZB file: %s', traceback.format_exc())
return False
def getAllDownloadStatus(self, ids):
raw_statuses = self.call('nzb')
release_downloads = ReleaseDownloadList(self)
for nzb in raw_statuses.get('nzbs', []):
if nzb['id'] in ids:
# Check status
status = 'busy'
if nzb['state'] == 20:
status = 'completed'
elif nzb['state'] in [21, 22, 24]:
status = 'failed'
release_downloads.append({
'id': nzb['id'],
'name': nzb['uiTitle'],
'status': status,
'original_status': nzb['state'],
'timeleft':-1,
'folder': sp(nzb['destinationPath']),
})
return release_downloads
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
try:
self.call('nzb/%s/cancel' % release_download['id'])
except:
log.error('Failed deleting: %s', traceback.format_exc(0))
return False
return True
def login(self):
nonce = self.call('auth/nonce', auth = False).get('authNonce')
cnonce = uuid4().hex
hashed = b64encode(hashlib.sha256('%s:%s:%s' % (nonce, cnonce, self.conf('api_key'))).digest())
params = {
'nonce': nonce,
'cnonce': cnonce,
'hash': hashed
}
login_data = self.call('auth/login', parameters = params, auth = False)
# Save for later
if login_data.get('loginResult') == 'successful':
self.session_id = login_data.get('sessionID')
return True
log.error('Login failed, please check you api-key')
return False
def call(self, call, parameters = None, repeat = False, auth = True, *args, **kwargs):
# Login first
if not parameters: parameters = {}
if not self.session_id and auth:
self.login()
# Always add session id to request
if self.session_id:
parameters['sessionid'] = self.session_id
params = tryUrlencode(parameters)
url = cleanHost(self.conf('host'), ssl = self.conf('ssl')) + 'api/' + call
try:
data = self.urlopen('%s?%s' % (url, params), *args, **kwargs)
if data:
return json.loads(data)
except URLError, e:
if hasattr(e, 'code') and e.code == 403:
# Try login and do again
if not repeat:
self.login()
return self.call(call, parameters = parameters, repeat = True, **kwargs)
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
except:
log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc()))
return {}
def getApiLevel(self):
if not self.api_level:
url = cleanHost(self.conf('host')) + 'api/app/apilevel'
try:
data = self.urlopen(url, show_error = False)
self.api_level = float(json.loads(data).get('apilevel'))
except URLError, e:
if hasattr(e, 'code') and e.code == 403:
log.error('This version of NZBVortex isn\'t supported. Please update to 2.8.6 or higher')
else:
log.error('NZBVortex doesn\'t seem to be running or maybe the remote option isn\'t enabled yet: %s', traceback.format_exc(1))
return self.api_level
def isEnabled(self, manual = False, data = None):
if not data: data = {}
return super(NZBVortex, self).isEnabled(manual, data) and self.getApiLevel()
class HTTPSConnection(httplib.HTTPSConnection):
def __init__(self, *args, **kwargs):
httplib.HTTPSConnection.__init__(self, *args, **kwargs)
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if sys.version_info < (2, 6, 7):
if hasattr(self, '_tunnel_host'):
self.sock = sock
self._tunnel()
else:
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version = ssl.PROTOCOL_TLSv1)
class HTTPSHandler(urllib2.HTTPSHandler):
def https_open(self, req):
return self.do_open(HTTPSConnection, req)
| gpl-3.0 |
jjscarafia/CUPS-Cloud-Print | testing/test_ccputils.py | 2 | 6577 | # CUPS Cloudprint - Print via Google Cloud Print
# Copyright (C) 2011 Simon Cadman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import sys
import pytest
import struct
sys.path.insert(0, ".")
from ccputils import Utils
from cupshelper import CUPSHelper
def teardown_function(function):
logging.shutdown()
reload(logging)
def test_SetupLogging():
testLogFile = '/tmp/testccp.log'
assert os.path.exists(testLogFile) is False
assert Utils.SetupLogging(testLogFile) is True
logging.error('test_setupLogging error test')
assert os.path.exists(testLogFile) is True
os.unlink(testLogFile)
def test_SetupLoggingDefault():
testLogFile = '/tmp/testccp.log'
assert os.path.exists(testLogFile) is False
Utils.logpath = testLogFile
assert Utils.SetupLogging() is True
logging.error('test_setupLogging error test')
assert os.path.exists(testLogFile) is True
os.unlink(testLogFile)
def test_SetupLoggingFails():
testLogFile = '/tmp/dirthatdoesntexist/testccp.log'
assert os.path.exists(testLogFile) is False
assert Utils.SetupLogging(testLogFile) is False
assert os.path.exists(testLogFile) is False
def test_fileIsPDFFails():
assert Utils.fileIsPDF(open('testing/testfiles/NotPdf.txt').read()) is False
def test_fileIsPDFSucceeds():
assert Utils.fileIsPDF(open('testing/testfiles/Test Page.pdf').read(128)) is True
def test_fileIsPDFErrors():
assert Utils.fileIsPDF("testdata") is False
def test_whichFails():
assert Utils.which('dsaph9oaghd9ahdsadsadsadsadasd') is None
def test_whichSucceeds():
assert Utils.which(
'bash') in (
'/bin/bash',
'/usr/bin/bash',
'/usr/sbin/bash')
def test_isExeSucceeds():
if os.path.exists('/usr/bin/sh'):
assert Utils.is_exe("/usr/bin/sh") is True
else:
assert Utils.is_exe("/bin/sh") is True
def test_isExeFails():
assert Utils.is_exe("/dev/null") is False
def test_getLPID():
assert int(Utils.GetLPID()) > 0
assert Utils.GetLPID() is not None
import grp
workingPrintGroupName = 'lp'
try:
grp.getgrnam(workingPrintGroupName)
except:
workingPrintGroupName = 'cups'
pass
assert Utils.GetLPID('brokendefault', 'brokenalternative', False) is None
assert int(
Utils.GetLPID(
'brokendefault',
workingPrintGroupName,
False)) > 0
assert Utils.GetLPID(
'brokendefault',
workingPrintGroupName,
False) is not None
# test blacklist works
assert Utils.GetLPID(
workingPrintGroupName,
'brokenalternative',
True,
[workingPrintGroupName,
'brokendefault',
'adm',
'wheel',
'root'],
True) is None
def test_showVersion():
assert Utils.ShowVersion("12345") is False
sys.argv = ['testfile', 'version']
with pytest.raises(SystemExit):
Utils.ShowVersion("12345")
def test_readFile():
Utils.WriteFile('/tmp/testfile', 'data')
assert Utils.ReadFile('/tmp/testfile') == 'data'
assert Utils.ReadFile('/tmp/filethatdoesntexist') is None
os.unlink('/tmp/testfile')
def test_writeFile():
Utils.WriteFile('/tmp/testfile', 'data') is True
Utils.WriteFile('/tmp/testfile/dsadsaasd', 'data') is False
os.unlink('/tmp/testfile')
def test_base64encode():
assert Utils.Base64Encode('data', 'pdf') == 'data:application/pdf;base64,ZGF0YQ=='
assert Utils.Base64Encode('data', 'other') == 'data:application/octet-stream;base64,ZGF0YQ=='
assert Utils.Base64Encode(
'data', 'something') == 'data:application/octet-stream;base64,ZGF0YQ=='
def test_GetLanguage():
assert Utils.GetLanguage(['en_GB', ]) == ("en", "en_GB")
assert Utils.GetLanguage(['en_US', ]) == ("en", "en_US")
assert Utils.GetLanguage(['fr_CA', ]) == ("fr", "fr_CA")
assert Utils.GetLanguage(['fr_FR', ]) == ("fr", "fr_FR")
assert Utils.GetLanguage(['it_IT', ]) == ("it", "it_IT")
assert Utils.GetLanguage(['en', ]) == ("en", "en")
assert Utils.GetLanguage([None, None]) == ("en", "en")
# also test with helper
helper = CUPSHelper()
if helper.getServerSetting('DefaultLanguage') is not None:
# if set
locale = helper.getServerSetting('DefaultLanguage')
lang = locale.split('_')[0]
assert Utils.GetLanguage([None, None], helper) == (lang, locale)
assert Utils.GetLanguage(['fr_CA', ], helper) == (lang, locale)
else:
# if not set
assert Utils.GetLanguage([None, None], helper) == ("en", "en")
assert Utils.GetLanguage(['fr_CA', ], helper) == ("fr", "fr_CA")
def test_GetDefaultPaperType():
assert Utils.GetDefaultPaperType('en_GB') == "A4"
assert Utils.GetDefaultPaperType('en_US') == "Letter"
assert Utils.GetDefaultPaperType('en_gb') == "A4"
assert Utils.GetDefaultPaperType('en_us') == "Letter"
assert Utils.GetDefaultPaperType('de_DE') == "A4"
assert Utils.GetDefaultPaperType('es_MX') == "Letter"
assert Utils.GetDefaultPaperType('') == "Letter"
def test_GetWindowSize():
# expect this to fail gracefully if no tty
assert Utils.GetWindowSize() is None
# pass in dummy winsize struct
dummywinsize = struct.pack('HHHH', 800, 600, 0, 0)
assert Utils.GetWindowSize(dummywinsize) == (800, 600)
# ensure window size of 0x0 returns none
dummywinsize = struct.pack('HHHH', 0, 0, 0, 0)
assert Utils.GetWindowSize(dummywinsize) is None
def test_StdInToTempFile():
testdata = 'oux3ooquoideereeng5A'
mockstdin = open('/tmp/stdintest', 'wb')
mockstdin.write(testdata)
mockstdin.close()
mockstdin = open('/tmp/stdintest', 'r')
tempfile = Utils.StdInToTempFile(1, 'testuser', stdin=mockstdin)
assert Utils.ReadFile(tempfile) == testdata
mockstdin.close()
os.unlink('/tmp/stdintest')
os.unlink(tempfile)
| gpl-3.0 |
farzadghanei/distutilazy | distutilazy/test.py | 1 | 8421 | """
distutilazy.test
-----------------
command classes to help run tests
:license: MIT. For more details see LICENSE file or
https://opensource.org/licenses/MIT
"""
from __future__ import absolute_import
import os
from os.path import abspath, basename, dirname
import sys
import fnmatch
from importlib import import_module
import unittest
from distutils.core import Command
from types import ModuleType
__version__ = "0.4.0"
def test_suite_for_modules(modules):
suite = unittest.TestSuite()
test_loader = unittest.defaultTestLoader
for module in modules:
module_tests = test_loader.loadTestsFromModule(module)
suite.addTests(module_tests)
return suite
def find_source_filename(source_name, dir_path):
"""Find the filename matching the source/module name
in the specified path. For example searching for "queue"
might return "queue.py" or "queue.pyc"
"""
source_filenames = [
os.path.join(dir_path, source_name + ext) for
ext in (".py", "pyc")]
for source_filename in source_filenames:
if os.path.exists(source_filename):
return source_filename
return None
class RunTests(Command):
description = """Run test suite"""
user_options = [("root=", "r", "path to tests suite dir"),
("pattern=", "p", "test file name pattern"),
("verbosity=", "v", "verbosity level [1,2,3]"),
("files=", None,
"run specified test files (comma separated)"),
("except-import-errors", None,
"except import errors when trying to import test "
"modules. Note: might shadow import errors raised "
"by the actual modules being tested")]
def initialize_options(self):
self.root = os.path.join(os.getcwd(), 'tests')
self.pattern = "test*.py"
self.verbosity = 1
self.files = None
self.except_import_errors = False
def finalize_options(self):
if not os.path.exists(self.root):
raise IOError("Failed to access root path '{}'".format(self.root))
verbosity = min(int(self.verbosity), 3)
if verbosity < 1:
self.verbosity = 1
else:
self.verbosity = verbosity
if self.files:
self.files = map(lambda name: name.strip(), self.files.split(','))
self.except_import_errors = bool(self.except_import_errors)
def get_modules_from_files(self, files):
modules = []
for file_name in files:
directory = dirname(file_name)
module_name, _, extension = basename(file_name).rpartition('.')
if not module_name:
self.announce(
"failed to find module name from filename '{}'." +
"skipping this file".format(file_name))
continue
package_name = self._import_dir_as_package(directory)
if package_name:
module_name = '.' + module_name
elif directory not in sys.path:
sys.path.insert(0, directory)
self.announce(
"importing module '{}' from file '{}' ...".format(module_name,
file_name))
module = import_module(module_name, package=package_name)
modules.append(module)
return modules
def _import_dir_as_package(self, directory):
"""Tries to import the specified directory path as a package, if it
seems to be a package. Returns the package name (if import was
successful) or None if directory is not a valid package."""
directory_name = basename(directory)
abs_dir = abspath(directory)
package_name = None
if directory_name and find_source_filename('__init__', abs_dir) is not None:
parent_dir = dirname(abs_dir)
if parent_dir not in sys.path:
sys.path.insert(0, parent_dir)
try:
self.announce(
"importing '{}' as package ...".format(directory_name))
import_module(directory_name)
package_name = directory_name
except ImportError as err:
self.announce(
"failed to import '{}'. {}".format(directory_name, err))
if self.except_import_errors and directory_name in str(err):
package_name = None
else:
raise err
return package_name
def find_test_modules_from_package_path(self, package_path):
"""Import and return modules from package __all__,
if path is found to be a package.
"""
package_dir = dirname(package_path)
package_name = basename(package_path)
if package_dir:
sys.path.insert(0, package_dir)
self.announce(
"importing package '{}' ...".format(package_name)
)
package = import_module(package_name)
if package and hasattr(package, '__all__'):
modules = []
for module_name in package.__all__:
module = import_module('{}.{}'.format(
package_name, module_name))
if type(module) == ModuleType \
and module not in modules:
modules.append(module)
return modules
def find_test_modules_from_test_files(self, root, pattern):
"""Return list of test modules from the the files in the path
whose name match the pattern
"""
modules = []
abs_root = abspath(root)
for (dir_path, directories, file_names) in os.walk(abs_root):
package_name = self._import_dir_as_package(dir_path)
if not package_name and dir_path not in sys.path:
sys.path.insert(0, dir_path)
for filename in fnmatch.filter(file_names, pattern):
module_name, _, extension = basename(filename).rpartition('.')
if not module_name:
self.announce(
"failed to find module name from filename '{}'." +
"skipping this test".format(filename))
continue
module_name_to_import = '.' + module_name if package_name else module_name
self.announce(
"importing module '{}' from '{}' ...".format(
module_name_to_import, filename
)
)
try:
module = import_module(module_name_to_import, package_name)
if type(module) == ModuleType and module not in modules:
modules.append(module)
except ImportError as err:
self.announce(
"failed to import '{}' from '{}'. {}." +
"skipping this file!".format(module_name_to_import, filename, err)
)
if not self.except_import_errors or module_name not in str(err):
raise err
except (ValueError, SystemError) as err:
self.announce(
"failed to import '{}' from '{}'. {}." +
"skipping this file!".format(module_name, filename, err)
)
return modules
def get_test_runner(self):
return unittest.TextTestRunner(verbosity=self.verbosity)
def run(self):
if self.files:
modules = self.get_modules_from_files(self.files)
else:
self.announce("searching for test package modules ...")
modules = self.find_test_modules_from_package_path(self.root)
if not modules:
self.announce("searching for test files ...")
modules = self.find_test_modules_from_test_files(self.root,
self.pattern)
if not modules:
self.announce("found no test files")
return False
suite = test_suite_for_modules(modules)
runner = self.get_test_runner()
self.announce("running tests ...")
runner.run(suite)
run_tests = RunTests
| mit |
CyanogenMod/android_kernel_cyanogen_msm8994 | Documentation/target/tcm_mod_builder.py | 2358 | 40707 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
mesosphere/marathon | tests/shakedown/shakedown/util.py | 2 | 14683 | import collections
import concurrent.futures
import contextlib
import functools
import hashlib
import json
import logging
import os
import platform
import shutil
import stat
import sys
import tempfile
import time
import six
from six.moves import urllib
from . import constants
from .errors import DCOSException
@contextlib.contextmanager
def tempdir():
"""A context manager for temporary directories.
The lifetime of the returned temporary directory corresponds to the
lexical scope of the returned file descriptor.
:return: Reference to a temporary directory
:rtype: str
"""
tmpdir = tempfile.mkdtemp()
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir, ignore_errors=True)
@contextlib.contextmanager
def temptext(content=None):
"""A context manager for temporary files.
The lifetime of the returned temporary file corresponds to the
lexical scope of the returned file descriptor.
:param content: Content to populate the file with.
:type content: bytes
:return: reference to a temporary file
:rtype: (fd, str)
"""
fd, path = tempfile.mkstemp()
if content:
os.write(fd, content)
try:
yield (fd, path)
finally:
# Close the file descriptor and ignore errors
try:
os.close(fd)
except OSError:
pass
# delete the path
shutil.rmtree(path, ignore_errors=True)
@contextlib.contextmanager
def remove_path_on_error(path):
"""A context manager for modifying a specific path
`path` and all subpaths will be removed on error
:rtype: None
"""
try:
yield path
except Exception:
shutil.rmtree(path, ignore_errors=True)
raise
@contextlib.contextmanager
def silent_output():
"""A context manager for suppressing stdout / stderr, it sets their file
descriptors to os.devnull and then restores them.
This is helpful for inhibiting output from subprocesses, as
replacing sys.stdout and sys.stderr wouldn't be enough.
"""
# Original fds stdout/stderr point to. Usually 1 and 2 on POSIX systems.
original_stdout_fd = sys.stdout.fileno()
original_stderr_fd = sys.stderr.fileno()
# Save a copy of original outputs fds
saved_stdout_fd = os.dup(original_stdout_fd)
saved_stderr_fd = os.dup(original_stderr_fd)
# A fd for the null device
devnull = open(os.devnull, 'w')
def _redirect_outputs(stdout_to_fd, stderr_to_fd):
"""Redirect stdout/stderr to the given file descriptors."""
# Flush outputs buffers
sys.stderr.flush()
sys.stdout.flush()
# Make original stdout / stderr fds point to the same file as to_fd
os.dup2(stdout_to_fd, original_stdout_fd)
os.dup2(stderr_to_fd, original_stderr_fd)
try:
# Redirect outputs to os.devnull
_redirect_outputs(devnull.fileno(), devnull.fileno())
yield
finally:
# redirect outputs back to the original fds
_redirect_outputs(saved_stdout_fd, saved_stderr_fd)
devnull.close()
def sh_copy(src, dst):
"""Copy file src to the file or directory dst.
:param src: source file
:type src: str
:param dst: destination file or directory
:type dst: str
:rtype: None
"""
try:
shutil.copy(src, dst)
except EnvironmentError as e:
logger.exception('Unable to copy [%s] to [%s]', src, dst)
if e.strerror:
if e.filename:
raise DCOSException("{}: {}".format(e.strerror, e.filename))
else:
raise DCOSException(e.strerror)
else:
raise DCOSException(e)
except Exception as e:
logger.exception('Unknown error while coping [%s] to [%s]', src, dst)
raise DCOSException(e)
def sh_move(src, dst):
"""Move file src to the file or directory dst.
:param src: source file
:type src: str
:param dst: destination file or directory
:type dst: str
:rtype: None
"""
try:
shutil.move(src, dst)
except EnvironmentError as e:
logger.exception('Unable to move [%s] to [%s]', src, dst)
if e.strerror:
if e.filename:
raise DCOSException("{}: {}".format(e.strerror, e.filename))
else:
raise DCOSException(e.strerror)
else:
raise DCOSException(e)
except Exception as e:
logger.exception('Unknown error while moving [%s] to [%s]', src, dst)
raise DCOSException(e)
def ensure_dir_exists(directory):
"""If `directory` does not exist, create it.
:param directory: path to the directory
:type directory: string
:rtype: None
"""
if not os.path.exists(directory):
logger.info('Creating directory: %r', directory)
try:
os.makedirs(directory, 0o775)
except os.error as e:
raise DCOSException(
'Cannot create directory [{}]: {}'.format(directory, e))
def ensure_file_exists(path):
""" Create file if it doesn't exist
:param path: path of file to create
:type path: str
:rtype: None
"""
if not os.path.exists(path):
try:
open(path, 'w').close()
os.chmod(path, 0o600)
except IOError as e:
raise DCOSException(
'Cannot create file [{}]: {}'.format(path, e))
def read_file(path):
"""
:param path: path to file
:type path: str
:returns: contents of file
:rtype: str
"""
if not os.path.isfile(path):
raise DCOSException('path [{}] is not a file'.format(path))
with open_file(path) as file_:
return file_.read()
def enforce_file_permissions(path):
"""Enforce 400 or 600 permissions on file
:param path: Path to the TOML file
:type path: str
:rtype: None
"""
if not os.path.isfile(path):
raise DCOSException('Path [{}] is not a file'.format(path))
# Unix permissions are incompatible with windows
# TODO: https://github.com/dcos/dcos-cli/issues/662
if sys.platform == 'win32':
return
else:
permissions = oct(stat.S_IMODE(os.stat(path).st_mode))
if permissions not in ['0o600', '0600', '0o400', '0400']:
if os.path.realpath(path) != path:
path = '%s (pointed to by %s)' % (os.path.realpath(path), path)
msg = (
"Permissions '{}' for configuration file '{}' are too open. "
"File must only be accessible by owner. "
"Aborting...".format(permissions, path))
raise DCOSException(msg)
def read_file_secure(path):
"""
Enforce 400 or 600 permissions when reading file
:param path: path to file
:type path: str
:returns: contents of file
:rtype: str
"""
enforce_file_permissions(path)
with open_file(path) as file_:
return file_.read()
def which(program):
"""Returns the path to the named executable program.
:param program: The program to locate:
:type program: str
:rtype: str
"""
def is_exe(file_path):
return os.path.isfile(file_path) and os.access(file_path, os.X_OK)
file_path, filename = os.path.split(program)
if file_path:
if is_exe(program):
return program
elif constants.PATH_ENV in os.environ:
for path in os.environ[constants.PATH_ENV].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
if is_windows_platform() and not program.endswith('.exe'):
return which(program + '.exe')
return None
def dcos_bin_path():
"""Returns the real DCOS path based on the current executable
:returns: the real path to the DCOS path
:rtype: str
"""
return os.path.dirname(os.path.realpath(sys.argv[0]))
def configure_debug(is_debug):
"""Configure debug messages for the program
:param is_debug: Enable debug message if true; otherwise disable debug
messages
:type is_debug: bool
:rtype: None
"""
if is_debug:
six.moves.http_client.HTTPConnection.debuglevel = 1
def load_json(reader, keep_order=False):
"""Deserialize a reader into a python object
:param reader: the json reader
:type reader: a :code:`.read()`-supporting object
:param keep_order: whether the return should be an ordered dictionary
:type keep_order: bool
:returns: the deserialized JSON object
:rtype: dict | list | str | int | float | bool
"""
try:
if keep_order:
return json.load(reader, object_pairs_hook=collections.OrderedDict)
else:
return json.load(reader)
except Exception as error:
logger.error(
'Unhandled exception while loading JSON: %r',
error)
raise DCOSException('Error loading JSON: {}'.format(error))
def list_to_err(errs):
"""convert list of error strings to a single string
:param errs: list of string errors
:type errs: [str]
:returns: error message
:rtype: str
"""
return str.join('\n\n', errs)
def parse_int(string):
"""Parse string and an integer
:param string: string to parse as an integer
:type string: str
:returns: the interger value of the string
:rtype: int
"""
try:
return int(string)
except ValueError:
logger.error(
'Unhandled exception while parsing string as int: %r',
string)
raise DCOSException('Error parsing string as int')
def parse_float(string):
"""Parse string and an float
:param string: string to parse as an float
:type string: str
:returns: the float value of the string
:rtype: float
"""
try:
return float(string)
except ValueError:
logger.error(
'Unhandled exception while parsing string as float: %r',
string)
raise DCOSException('Error parsing string as float')
def is_windows_platform():
"""
:returns: True is program is running on Windows platform, False
in other case
:rtype: boolean
"""
return platform.system() == "Windows"
def duration(fn):
""" Decorator to log the duration of a function.
:param fn: function to measure
:type fn: function
:returns: wrapper function
:rtype: function
"""
@functools.wraps(fn)
def timer(*args, **kwargs):
start = time.time()
try:
return fn(*args, **kwargs)
finally:
logger.debug("duration: {0}.{1}: {2:2.2f}s".format(
fn.__module__,
fn.__name__,
time.time() - start))
return timer
def humanize_bytes(b):
""" Return a human representation of a number of bytes.
:param b: number of bytes
:type b: number
:returns: human representation of a number of bytes
:rtype: str
"""
abbrevs = (
(1 << 30, 'GB'),
(1 << 20, 'MB'),
(1 << 10, 'kB'),
(1, 'B')
)
for factor, suffix in abbrevs:
if b >= factor:
break
return "{0:.2f} {1}".format(b/float(factor), suffix)
@contextlib.contextmanager
def open_file(path, *args):
"""Context manager that opens a file, and raises a DCOSException if
it fails.
:param path: file path
:type path: str
:param *args: other arguments to pass to `open`
:type *args: [str]
:returns: a context manager
:rtype: context manager
"""
try:
file_ = open(path, *args)
yield file_
except IOError as e:
logger.exception('Unable to open file: %s', path)
raise io_exception(path, e.errno)
file_.close()
def io_exception(path, errno):
"""Returns a DCOSException for when there is an error opening the
file at `path`
:param path: file path
:type path: str
:param errno: IO error number
:type errno: int
:returns: DCOSException
:rtype: DCOSException
"""
return DCOSException('Error opening file [{}]: {}'.format(
path, os.strerror(errno)))
STREAM_CONCURRENCY = 20
def stream(fn, objs):
"""Apply `fn` to `objs` in parallel, yielding the (Future, obj) for
each as it completes.
:param fn: function
:type fn: function
:param objs: objs
:type objs: objs
:returns: iterator over (Future, typeof(obj))
:rtype: iterator over (Future, typeof(obj))
"""
with concurrent.futures.ThreadPoolExecutor(STREAM_CONCURRENCY) as pool:
jobs = {pool.submit(fn, obj): obj for obj in objs}
for job in concurrent.futures.as_completed(jobs):
yield job, jobs[job]
def get_ssh_options(config_file, options):
"""Returns the SSH arguments for the given parameters. Used by
commands that wrap SSH.
:param config_file: SSH config file.
:type config_file: str | None
:param options: SSH options
:type options: [str]
:rtype: str
"""
ssh_options = ' '.join('-o {}'.format(opt) for opt in options)
if config_file:
ssh_options += ' -F {}'.format(config_file)
if ssh_options:
ssh_options += ' '
return ssh_options
def normalize_marathon_id_path(id_path):
"""Normalizes a Marathon "ID path", such as an app ID, group ID, or pod ID.
A normalized path has a single leading forward slash (/), no trailing
forward slashes, and has all URL-unsafe characters escaped, as if by
urllib.parse.quote().
:param id_path
:type id_path: str
:returns: normalized path
:rtype: str
"""
return urllib.parse.quote('/' + id_path.strip('/'))
logger = logging.getLogger(__name__)
def md5_hash_file(file):
"""Calculates the md5 of a file. Will set the
file pointer to beginning of the file after being
called.
:param file: file to hash, file pointer
must be at the beginning of the file.
:type file: file
:returns: digest in hexadecimal
:rtype: str
"""
hasher = hashlib.md5()
for chunk in iter(lambda: file.read(4096), b''):
hasher.update(chunk)
file.seek(0)
return hasher.hexdigest()
def read_file_json(path):
""" Read the options at the given file path.
:param path: file path
:type path: None | str
:returns: options
:rtype: dict
"""
if path is None:
return {}
else:
# Expand ~ in the path
path = os.path.expanduser(path)
with open_file(path) as options_file:
return load_json(options_file)
| apache-2.0 |
fyfcauc/android_external_chromium-org | third_party/closure_linter/closure_linter/fixjsstyle_test.py | 135 | 7371 | #!/usr/bin/env python
#
# Copyright 2008 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Medium tests for the gpylint auto-fixer."""
__author__ = 'robbyw@google.com (Robby Walker)'
import StringIO
import gflags as flags
import unittest as googletest
from closure_linter import checker
from closure_linter import error_fixer
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = True
flags.FLAGS.limited_doc_files = ('dummy.js', 'externs.js')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
class FixJsStyleTest(googletest.TestCase):
"""Test case to for gjslint auto-fixing."""
def testFixJsStyle(self):
test_cases = [['fixjsstyle.in.js', 'fixjsstyle.out.js'],
['indentation.js', 'fixjsstyle.indentation.out.js']]
for [running_input_file, running_output_file] in test_cases:
input_filename = None
golden_filename = None
current_filename = None
try:
input_filename = '%s/%s' % (_RESOURCE_PREFIX, running_input_file)
current_filename = input_filename
golden_filename = '%s/%s' % (_RESOURCE_PREFIX, running_output_file)
current_filename = golden_filename
except IOError, ex:
raise IOError('Could not find testdata resource for %s: %s' %
(current_filename, ex))
if running_input_file == 'fixjsstyle.in.js':
with open(input_filename) as f:
for line in f:
# Go to last line.
pass
self.assertTrue(line == line.rstrip(), '%s file should not end '
'with a new line.' % (input_filename))
# Autofix the file, sending output to a fake file.
actual = StringIO.StringIO()
style_checker = checker.JavaScriptStyleChecker(
error_fixer.ErrorFixer(actual))
style_checker.Check(input_filename)
# Now compare the files.
actual.seek(0)
expected = open(golden_filename, 'r')
self.assertEqual(actual.readlines(), expected.readlines())
def testMissingExtraAndUnsortedRequires(self):
"""Tests handling of missing extra and unsorted goog.require statements."""
original = [
"goog.require('dummy.aa');",
"goog.require('dummy.Cc');",
"goog.require('dummy.Dd');",
"",
"var x = new dummy.Bb();",
"dummy.Cc.someMethod();",
"dummy.aa.someMethod();",
]
expected = [
"goog.require('dummy.Bb');",
"goog.require('dummy.Cc');",
"goog.require('dummy.aa');",
"",
"var x = new dummy.Bb();",
"dummy.Cc.someMethod();",
"dummy.aa.someMethod();",
]
self._AssertFixes(original, expected)
def testMissingExtraAndUnsortedProvides(self):
"""Tests handling of missing extra and unsorted goog.provide statements."""
original = [
"goog.provide('dummy.aa');",
"goog.provide('dummy.Cc');",
"goog.provide('dummy.Dd');",
"",
"dummy.Cc = function() {};",
"dummy.Bb = function() {};",
"dummy.aa.someMethod = function();",
]
expected = [
"goog.provide('dummy.Bb');",
"goog.provide('dummy.Cc');",
"goog.provide('dummy.aa');",
"",
"dummy.Cc = function() {};",
"dummy.Bb = function() {};",
"dummy.aa.someMethod = function();",
]
self._AssertFixes(original, expected)
def testNoRequires(self):
"""Tests positioning of missing requires without existing requires."""
original = [
"goog.provide('dummy.Something');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
expected = [
"goog.provide('dummy.Something');",
"",
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
self._AssertFixes(original, expected)
def testNoProvides(self):
"""Tests positioning of missing provides without existing provides."""
original = [
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
expected = [
"goog.provide('dummy.Something');",
"",
"goog.require('dummy.Bb');",
"",
"dummy.Something = function() {};",
"",
"var x = new dummy.Bb();",
]
self._AssertFixes(original, expected)
def testGoogScopeIndentation(self):
"""Tests Handling a typical end-of-scope indentation fix."""
original = [
'goog.scope(function() {',
' // TODO(brain): Take over the world.',
'}); // goog.scope',
]
expected = [
'goog.scope(function() {',
'// TODO(brain): Take over the world.',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeComment(self):
"""Tests Handling a missing comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'});',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMissingEndOfScopeCommentWithOtherComment(self):
"""Tests handling an irrelevant comment at end of goog.scope."""
original = [
'goog.scope(function() {',
"}); // I don't belong here!",
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def testMalformedEndOfScopeComment(self):
"""Tests Handling a malformed comment at end of goog.scope."""
original = [
'goog.scope(function() {',
'}); // goog.scope FTW',
]
expected = [
'goog.scope(function() {',
'}); // goog.scope',
]
self._AssertFixes(original, expected)
def _AssertFixes(self, original, expected):
"""Asserts that the error fixer corrects original to expected."""
original = self._GetHeader() + original
expected = self._GetHeader() + expected
actual = StringIO.StringIO()
style_checker = checker.JavaScriptStyleChecker(
error_fixer.ErrorFixer(actual))
style_checker.CheckLines('testing.js', original, False)
actual.seek(0)
expected = [x + '\n' for x in expected]
self.assertListEqual(actual.readlines(), expected)
def _GetHeader(self):
"""Returns a fake header for a JavaScript file."""
return [
"// Copyright 2011 Google Inc. All Rights Reserved.",
"",
"/**",
" * @fileoverview Fake file overview.",
" * @author fake@google.com (Fake Person)",
" */",
""
]
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
mancoast/CPythonPyc_test | cpython/275_test_descrtut.py | 75 | 12052 | # This contains most of the executable examples from Guido's descr
# tutorial, once at
#
# http://www.python.org/2.2/descrintro.html
#
# A few examples left implicit in the writeup were fleshed out, a few were
# skipped due to lack of interest (e.g., faking super() by hand isn't
# of much interest anymore), and a few were fiddled to make the output
# deterministic.
from test.test_support import sortdict
import pprint
class defaultdict(dict):
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_1 = """
Here's the new type at work:
>>> print defaultdict # show our type
<class 'test.test_descrtut.defaultdict'>
>>> print type(defaultdict) # its metatype
<type 'type'>
>>> a = defaultdict(default=0.0) # create an instance
>>> print a # show the instance
{}
>>> print type(a) # show its type
<class 'test.test_descrtut.defaultdict'>
>>> print a.__class__ # show its class
<class 'test.test_descrtut.defaultdict'>
>>> print type(a) is a.__class__ # its type is its class
True
>>> a[1] = 3.25 # modify the instance
>>> print a # show the new value
{1: 3.25}
>>> print a[1] # show the new item
3.25
>>> print a[0] # a non-existent item
0.0
>>> a.merge({1:100, 2:200}) # use a dict method
>>> print sortdict(a) # show the result
{1: 3.25, 2: 200}
>>>
We can also use the new type in contexts where classic only allows "real"
dictionaries, such as the locals/globals dictionaries for the exec
statement or the built-in function eval():
>>> def sorted(seq):
... seq.sort(key=str)
... return seq
>>> print sorted(a.keys())
[1, 2]
>>> exec "x = 3; print x" in a
3
>>> print sorted(a.keys())
[1, 2, '__builtins__', 'x']
>>> print a['x']
3
>>>
Now I'll show that defaultdict instances have dynamic instance variables,
just like classic classes:
>>> a.default = -1
>>> print a["noway"]
-1
>>> a.default = -1000
>>> print a["noway"]
-1000
>>> 'default' in dir(a)
True
>>> a.x1 = 100
>>> a.x2 = 200
>>> print a.x1
100
>>> d = dir(a)
>>> 'default' in d and 'x1' in d and 'x2' in d
True
>>> print sortdict(a.__dict__)
{'default': -1000, 'x1': 100, 'x2': 200}
>>>
"""
class defaultdict2(dict):
__slots__ = ['default']
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
test_2 = """
The __slots__ declaration takes a list of instance variables, and reserves
space for exactly these in the instance. When __slots__ is used, other
instance variables cannot be assigned to:
>>> a = defaultdict2(default=0.0)
>>> a[1]
0.0
>>> a.default = -1
>>> a[1]
-1
>>> a.x1 = 1
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'defaultdict2' object has no attribute 'x1'
>>>
"""
test_3 = """
Introspecting instances of built-in types
For instance of built-in types, x.__class__ is now the same as type(x):
>>> type([])
<type 'list'>
>>> [].__class__
<type 'list'>
>>> list
<type 'list'>
>>> isinstance([], list)
True
>>> isinstance([], dict)
False
>>> isinstance([], object)
True
>>>
Under the new proposal, the __methods__ attribute no longer exists:
>>> [].__methods__
Traceback (most recent call last):
File "<stdin>", line 1, in ?
AttributeError: 'list' object has no attribute '__methods__'
>>>
Instead, you can get the same information from the list type:
>>> pprint.pprint(dir(list)) # like list.__dict__.keys(), but sorted
['__add__',
'__class__',
'__contains__',
'__delattr__',
'__delitem__',
'__delslice__',
'__doc__',
'__eq__',
'__format__',
'__ge__',
'__getattribute__',
'__getitem__',
'__getslice__',
'__gt__',
'__hash__',
'__iadd__',
'__imul__',
'__init__',
'__iter__',
'__le__',
'__len__',
'__lt__',
'__mul__',
'__ne__',
'__new__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__reversed__',
'__rmul__',
'__setattr__',
'__setitem__',
'__setslice__',
'__sizeof__',
'__str__',
'__subclasshook__',
'append',
'count',
'extend',
'index',
'insert',
'pop',
'remove',
'reverse',
'sort']
The new introspection API gives more information than the old one: in
addition to the regular methods, it also shows the methods that are
normally invoked through special notations, e.g. __iadd__ (+=), __len__
(len), __ne__ (!=). You can invoke any method from this list directly:
>>> a = ['tic', 'tac']
>>> list.__len__(a) # same as len(a)
2
>>> a.__len__() # ditto
2
>>> list.append(a, 'toe') # same as a.append('toe')
>>> a
['tic', 'tac', 'toe']
>>>
This is just like it is for user-defined classes.
"""
test_4 = """
Static methods and class methods
The new introspection API makes it possible to add static methods and class
methods. Static methods are easy to describe: they behave pretty much like
static methods in C++ or Java. Here's an example:
>>> class C:
...
... @staticmethod
... def foo(x, y):
... print "staticmethod", x, y
>>> C.foo(1, 2)
staticmethod 1 2
>>> c = C()
>>> c.foo(1, 2)
staticmethod 1 2
Class methods use a similar pattern to declare methods that receive an
implicit first argument that is the *class* for which they are invoked.
>>> class C:
... @classmethod
... def foo(cls, y):
... print "classmethod", cls, y
>>> C.foo(1)
classmethod test.test_descrtut.C 1
>>> c = C()
>>> c.foo(1)
classmethod test.test_descrtut.C 1
>>> class D(C):
... pass
>>> D.foo(1)
classmethod test.test_descrtut.D 1
>>> d = D()
>>> d.foo(1)
classmethod test.test_descrtut.D 1
This prints "classmethod __main__.D 1" both times; in other words, the
class passed as the first argument of foo() is the class involved in the
call, not the class involved in the definition of foo().
But notice this:
>>> class E(C):
... @classmethod
... def foo(cls, y): # override C.foo
... print "E.foo() called"
... C.foo(y)
>>> E.foo(1)
E.foo() called
classmethod test.test_descrtut.C 1
>>> e = E()
>>> e.foo(1)
E.foo() called
classmethod test.test_descrtut.C 1
In this example, the call to C.foo() from E.foo() will see class C as its
first argument, not class E. This is to be expected, since the call
specifies the class C. But it stresses the difference between these class
methods and methods defined in metaclasses (where an upcall to a metamethod
would pass the target class as an explicit first argument).
"""
test_5 = """
Attributes defined by get/set methods
>>> class property(object):
...
... def __init__(self, get, set=None):
... self.__get = get
... self.__set = set
...
... def __get__(self, inst, type=None):
... return self.__get(inst)
...
... def __set__(self, inst, value):
... if self.__set is None:
... raise AttributeError, "this attribute is read-only"
... return self.__set(inst, value)
Now let's define a class with an attribute x defined by a pair of methods,
getx() and setx():
>>> class C(object):
...
... def __init__(self):
... self.__x = 0
...
... def getx(self):
... return self.__x
...
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
...
... x = property(getx, setx)
Here's a small demonstration:
>>> a = C()
>>> a.x = 10
>>> print a.x
10
>>> a.x = -10
>>> print a.x
0
>>>
Hmm -- property is builtin now, so let's try it that way too.
>>> del property # unmask the builtin
>>> property
<type 'property'>
>>> class C(object):
... def __init__(self):
... self.__x = 0
... def getx(self):
... return self.__x
... def setx(self, x):
... if x < 0: x = 0
... self.__x = x
... x = property(getx, setx)
>>> a = C()
>>> a.x = 10
>>> print a.x
10
>>> a.x = -10
>>> print a.x
0
>>>
"""
test_6 = """
Method resolution order
This example is implicit in the writeup.
>>> class A: # classic class
... def save(self):
... print "called A.save()"
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print "called C.save()"
>>> class D(B, C):
... pass
>>> D().save()
called A.save()
>>> class A(object): # new class
... def save(self):
... print "called A.save()"
>>> class B(A):
... pass
>>> class C(A):
... def save(self):
... print "called C.save()"
>>> class D(B, C):
... pass
>>> D().save()
called C.save()
"""
class A(object):
def m(self):
return "A"
class B(A):
def m(self):
return "B" + super(B, self).m()
class C(A):
def m(self):
return "C" + super(C, self).m()
class D(C, B):
def m(self):
return "D" + super(D, self).m()
test_7 = """
Cooperative methods and "super"
>>> print D().m() # "DCBA"
DCBA
"""
test_8 = """
Backwards incompatibilities
>>> class A:
... def foo(self):
... print "called A.foo()"
>>> class B(A):
... pass
>>> class C(A):
... def foo(self):
... B.foo(self)
>>> C().foo()
Traceback (most recent call last):
...
TypeError: unbound method foo() must be called with B instance as first argument (got C instance instead)
>>> class C(A):
... def foo(self):
... A.foo(self)
>>> C().foo()
called A.foo()
"""
__test__ = {"tut1": test_1,
"tut2": test_2,
"tut3": test_3,
"tut4": test_4,
"tut5": test_5,
"tut6": test_6,
"tut7": test_7,
"tut8": test_8}
# Magic test name that regrtest.py invokes *after* importing this module.
# This worms around a bootstrap problem.
# Note that doctest and regrtest both look in sys.argv for a "-v" argument,
# so this works as expected in both ways of running regrtest.
def test_main(verbose=None):
# Obscure: import this module as test.test_descrtut instead of as
# plain test_descrtut because the name of this module works its way
# into the doctest examples, and unless the full test.test_descrtut
# business is used the name can change depending on how the test is
# invoked.
from test import test_support, test_descrtut
test_support.run_doctest(test_descrtut, verbose)
# This part isn't needed for regrtest, but for running the test directly.
if __name__ == "__main__":
test_main(1)
| gpl-3.0 |
appsembler/edx-platform | pavelib/paver_tests/test_eslint.py | 14 | 1731 | """
Tests for Paver's Stylelint tasks.
"""
import unittest
from mock import patch
from paver.easy import BuildFailure, call_task
import pavelib.quality
class TestPaverESLint(unittest.TestCase):
"""
For testing run_eslint
"""
def setUp(self):
super(TestPaverESLint, self).setUp()
# Mock the paver @needs decorator
self._mock_paver_needs = patch.object(pavelib.quality.run_eslint, 'needs').start()
self._mock_paver_needs.return_value = 0
# Mock shell commands
patcher = patch('pavelib.quality.sh')
self._mock_paver_sh = patcher.start()
# Cleanup mocks
self.addCleanup(patcher.stop)
self.addCleanup(self._mock_paver_needs.stop)
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_eslint_violation_number_not_found(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
"""
run_eslint encounters an error parsing the eslint output log
"""
mock_count.return_value = None
with self.assertRaises(BuildFailure):
call_task('pavelib.quality.run_eslint', args=[''])
@patch.object(pavelib.quality, '_write_metric')
@patch.object(pavelib.quality, '_prepare_report_dir')
@patch.object(pavelib.quality, '_get_count_from_last_line')
def test_eslint_vanilla(self, mock_count, mock_report_dir, mock_write_metric): # pylint: disable=unused-argument
"""
eslint finds violations, but a limit was not set
"""
mock_count.return_value = 1
pavelib.quality.run_eslint("")
| agpl-3.0 |
edublancas/python-ds-tools | tests/pipeline/test_pipeline.py | 2 | 5059 | import subprocess
from pathlib import Path
from dstools.pipeline.dag import DAG
from dstools.pipeline.tasks import BashCommand
from dstools.pipeline.products import File
kwargs = {'stderr': subprocess.PIPE,
'stdout': subprocess.PIPE,
'shell': True}
def test_non_existent_file():
dag = DAG()
f = File('file.txt')
ta = BashCommand('echo hi > {{product}}', f, dag, 'ta')
ta.render()
assert not f.exists()
assert f._outdated()
assert f._outdated_code_dependency()
assert not f._outdated_data_dependencies()
def test_outdated_data_simple_dependency(tmp_directory):
""" A -> B
"""
dag = DAG()
fa = Path('a.txt')
fb = Path('b.txt')
ta = BashCommand('touch {{product}}', File(fa), dag, 'ta', {}, kwargs,
False)
tb = BashCommand('cat {{upstream["ta"]}} > {{product}}', File(fb), dag,
'tb', {}, kwargs, False)
ta >> tb
ta.render()
tb.render()
assert not ta.product.exists()
assert not tb.product.exists()
assert ta.product._outdated()
assert tb.product._outdated()
dag.build()
dag._clear_cached_outdated_status()
# they both exist now
assert ta.product.exists()
assert tb.product.exists()
# and arent outdated...
assert not ta.product._outdated()
assert not tb.product._outdated()
# let's make b outdated
ta.build(force=True)
dag._clear_cached_outdated_status()
assert not ta.product._outdated()
assert tb.product._outdated()
def test_many_upstream(tmp_directory):
""" {A, B} -> C
"""
dag = DAG()
fa = Path('a.txt')
fb = Path('b.txt')
fc = Path('c.txt')
ta = BashCommand('touch {{product}}', File(fa),
dag, 'ta', {}, kwargs, False)
tb = BashCommand('touch {{product}} > {{product}}', File(fb),
dag, 'tb', {}, kwargs, False)
tc = BashCommand('cat {{upstream["ta"]}} {{upstream["tb"]}} > {{product}}',
File(fc), dag, 'tc', {}, kwargs, False)
(ta + tb) >> tc
dag.build()
assert ta.product.exists()
assert tb.product.exists()
assert tc.product.exists()
assert not ta.product._outdated()
assert not tb.product._outdated()
assert not tc.product._outdated()
ta.build(force=True)
dag._clear_cached_outdated_status()
assert not ta.product._outdated()
assert not tb.product._outdated()
assert tc.product._outdated()
dag.build()
tb.build(force=True)
dag._clear_cached_outdated_status()
assert not ta.product._outdated()
assert not tb.product._outdated()
assert tc.product._outdated()
def test_many_downstream():
""" A -> {B, C}
"""
pass
def test_chained_dependency():
""" A -> B -> C
"""
pass
def test_can_create_task_with_many_products():
dag = DAG()
fa1 = File('a1.txt')
fa2 = File('a2.txt')
ta = BashCommand('echo {{product}}', [fa1, fa2], dag, 'ta')
ta.render()
assert not ta.product.exists()
assert ta.product._outdated()
assert ta.product._outdated_code_dependency()
assert not ta.product._outdated_data_dependencies()
def test_overloaded_operators():
dag = DAG()
fa = Path('a.txt')
fb = Path('b.txt')
fc = Path('c.txt')
ta = BashCommand('touch {{product}}', File(fa), dag, 'ta')
tb = BashCommand('touch {{product}}', File(fb), dag, 'tb')
tc = BashCommand('touch {{product}}', File(fc), dag, 'tc')
ta >> tb >> tc
assert not ta.upstream
assert tb in tc.upstream.values()
assert ta in tb.upstream.values()
def test_adding_tasks():
dag = DAG()
fa = Path('a.txt')
fb = Path('b.txt')
fc = Path('c.txt')
ta = BashCommand('touch {{product}}', File(fa), dag, 'ta')
tb = BashCommand('touch {{product}}', File(fb), dag, 'tb')
tc = BashCommand('touch {{product}}', File(fc), dag, 'tc')
assert list((ta + tb).tasks) == [ta, tb]
assert list((tb + ta).tasks) == [tb, ta]
assert list((ta + tb + tc).tasks) == [ta, tb, tc]
assert list(((ta + tb) + tc).tasks) == [ta, tb, tc]
assert list((ta + (tb + tc)).tasks) == [ta, tb, tc]
def test_adding_tasks_left():
dag = DAG()
fa = Path('a.txt')
fb = Path('b.txt')
fc = Path('c.txt')
ta = BashCommand('touch {{product}}', File(fa), dag, 'ta')
tb = BashCommand('touch {{product}}', File(fb), dag, 'tb')
tc = BashCommand('touch {{product}}', File(fc), dag, 'tc')
(ta + tb) >> tc
assert not ta.upstream
assert not tb.upstream
assert list(tc.upstream.values()) == [ta, tb]
def test_adding_tasks_right():
dag = DAG()
fa = Path('a.txt')
fb = Path('b.txt')
fc = Path('c.txt')
ta = BashCommand('touch {{product}}', File(fa), dag, 'ta')
tb = BashCommand('touch {{product}}', File(fb), dag, 'tb')
tc = BashCommand('touch {{product}}', File(fc), dag, 'tc')
ta >> (tb + tc)
assert not ta.upstream
assert list(tb.upstream.values()) == [ta]
assert list(tc.upstream.values()) == [ta]
| mit |
ryfeus/lambda-packs | Lxml_requests/source/google/protobuf/text_encoding.py | 152 | 4617 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Encoding related utilities."""
import re
import six
# Lookup table for utf8
_cescape_utf8_to_str = [chr(i) for i in range(0, 256)]
_cescape_utf8_to_str[9] = r'\t' # optional escape
_cescape_utf8_to_str[10] = r'\n' # optional escape
_cescape_utf8_to_str[13] = r'\r' # optional escape
_cescape_utf8_to_str[39] = r"\'" # optional escape
_cescape_utf8_to_str[34] = r'\"' # necessary escape
_cescape_utf8_to_str[92] = r'\\' # necessary escape
# Lookup table for non-utf8, with necessary escapes at (o >= 127 or o < 32)
_cescape_byte_to_str = ([r'\%03o' % i for i in range(0, 32)] +
[chr(i) for i in range(32, 127)] +
[r'\%03o' % i for i in range(127, 256)])
_cescape_byte_to_str[9] = r'\t' # optional escape
_cescape_byte_to_str[10] = r'\n' # optional escape
_cescape_byte_to_str[13] = r'\r' # optional escape
_cescape_byte_to_str[39] = r"\'" # optional escape
_cescape_byte_to_str[34] = r'\"' # necessary escape
_cescape_byte_to_str[92] = r'\\' # necessary escape
def CEscape(text, as_utf8):
"""Escape a bytes string for use in an ascii protocol buffer.
text.encode('string_escape') does not seem to satisfy our needs as it
encodes unprintable characters using two-digit hex escapes whereas our
C++ unescaping function allows hex escapes to be any length. So,
"\0011".encode('string_escape') ends up being "\\x011", which will be
decoded in C++ as a single-character string with char code 0x11.
Args:
text: A byte string to be escaped
as_utf8: Specifies if result should be returned in UTF-8 encoding
Returns:
Escaped string
"""
# PY3 hack: make Ord work for str and bytes:
# //platforms/networking/data uses unicode here, hence basestring.
Ord = ord if isinstance(text, six.string_types) else lambda x: x
if as_utf8:
return ''.join(_cescape_utf8_to_str[Ord(c)] for c in text)
return ''.join(_cescape_byte_to_str[Ord(c)] for c in text)
_CUNESCAPE_HEX = re.compile(r'(\\+)x([0-9a-fA-F])(?![0-9a-fA-F])')
_cescape_highbit_to_str = ([chr(i) for i in range(0, 127)] +
[r'\%03o' % i for i in range(127, 256)])
def CUnescape(text):
"""Unescape a text string with C-style escape sequences to UTF-8 bytes."""
def ReplaceHex(m):
# Only replace the match if the number of leading back slashes is odd. i.e.
# the slash itself is not escaped.
if len(m.group(1)) & 1:
return m.group(1) + 'x0' + m.group(2)
return m.group(0)
# This is required because the 'string_escape' encoding doesn't
# allow single-digit hex escapes (like '\xf').
result = _CUNESCAPE_HEX.sub(ReplaceHex, text)
if str is bytes: # PY2
return result.decode('string_escape')
result = ''.join(_cescape_highbit_to_str[ord(c)] for c in result)
return (result.encode('ascii') # Make it bytes to allow decode.
.decode('unicode_escape')
# Make it bytes again to return the proper type.
.encode('raw_unicode_escape'))
| mit |
york-2015/yowsup | yowsup/layers/protocol_iq/protocolentities/iq_error.py | 61 | 1645 | from yowsup.structs import ProtocolTreeNode
from .iq import IqProtocolEntity
class ErrorIqProtocolEntity(IqProtocolEntity):
'''<iq id="1417113419-0" from="{{jid}}" type="error">
<error text="not-acceptable" code="406" backoff="3600">
</error>
</iq>
'''
def __init__(self, _id, _from, code, text, backoff= 0 ):
super(ErrorIqProtocolEntity, self).__init__(xmlns = None, _id = _id, _type = "error", _from = _from)
self.setErrorProps(code, text, backoff)
def setErrorProps(self, code, text, backoff):
self.code = code
self.text = text
self.backoff = int(backoff) if backoff else 0
def toProtocolTreeNode(self):
node = super(ErrorIqProtocolEntity, self).toProtocolTreeNode()
errorNode = ProtocolTreeNode("error", {"text": self.text, "code": self.code})
if self.backoff:
errorNode.setAttribute("backoff", str(self.backoff))
node.addChild(errorNode)
return node
def __str__(self):
out = super(ErrorIqProtocolEntity, self).__str__()
out += "Code: %s\n" % self.code
out += "Text: %s\n" % self.text
out += "Backoff: %s\n" % self.backoff
return out
@staticmethod
def fromProtocolTreeNode(node):
entity = IqProtocolEntity.fromProtocolTreeNode(node)
entity.__class__ = ErrorIqProtocolEntity
errorNode = node.getChild("error")
entity.setErrorProps(errorNode.getAttributeValue("code"),
errorNode.getAttributeValue("text"),
errorNode.getAttributeValue("backoff"))
return entity
| gpl-3.0 |
mdanielwork/intellij-community | python/lib/Lib/site-packages/django/db/models/manager.py | 306 | 7872 | from django.utils import copycompat as copy
from django.conf import settings
from django.db import router
from django.db.models.query import QuerySet, EmptyQuerySet, insert_query, RawQuerySet
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
def ensure_default_manager(sender, **kwargs):
"""
Ensures that a Model subclass contains a default manager and sets the
_default_manager attribute on the class. Also sets up the _base_manager
points to a plain Manager instance (which could be the same as
_default_manager if it's not a subclass of Manager).
"""
cls = sender
if cls._meta.abstract:
return
if not getattr(cls, '_default_manager', None):
# Create the default manager, if needed.
try:
cls._meta.get_field('objects')
raise ValueError("Model %s must specify a custom Manager, because it has a field named 'objects'" % cls.__name__)
except FieldDoesNotExist:
pass
cls.add_to_class('objects', Manager())
cls._base_manager = cls.objects
elif not getattr(cls, '_base_manager', None):
default_mgr = cls._default_manager.__class__
if (default_mgr is Manager or
getattr(default_mgr, "use_for_related_fields", False)):
cls._base_manager = cls._default_manager
else:
# Default manager isn't a plain Manager class, or a suitable
# replacement, so we walk up the base class hierarchy until we hit
# something appropriate.
for base_class in default_mgr.mro()[1:]:
if (base_class is Manager or
getattr(base_class, "use_for_related_fields", False)):
cls.add_to_class('_base_manager', base_class())
return
raise AssertionError("Should never get here. Please report a bug, including your model and model manager setup.")
signals.class_prepared.connect(ensure_default_manager)
class Manager(object):
# Tracks each time a Manager instance is created. Used to retain order.
creation_counter = 0
def __init__(self):
super(Manager, self).__init__()
self._set_creation_counter()
self.model = None
self._inherited = False
self._db = None
def contribute_to_class(self, model, name):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
setattr(model, name, ManagerDescriptor(self))
if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
model._default_manager = self
if model._meta.abstract or (self._inherited and not self.model._meta.proxy):
model._meta.abstract_managers.append((self.creation_counter, name,
self))
else:
model._meta.concrete_managers.append((self.creation_counter, name,
self))
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
class-level copy.
"""
self.creation_counter = Manager.creation_counter
Manager.creation_counter += 1
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
assert issubclass(model, self.model)
mgr = copy.copy(self)
mgr._set_creation_counter()
mgr.model = model
mgr._inherited = True
return mgr
def db_manager(self, using):
obj = copy.copy(self)
obj._db = using
return obj
@property
def db(self):
return self._db or router.db_for_read(self.model)
#######################
# PROXIES TO QUERYSET #
#######################
def get_empty_query_set(self):
return EmptyQuerySet(self.model, using=self._db)
def get_query_set(self):
"""Returns a new QuerySet object. Subclasses can override this method
to easily customize the behavior of the Manager.
"""
return QuerySet(self.model, using=self._db)
def none(self):
return self.get_empty_query_set()
def all(self):
return self.get_query_set()
def count(self):
return self.get_query_set().count()
def dates(self, *args, **kwargs):
return self.get_query_set().dates(*args, **kwargs)
def distinct(self, *args, **kwargs):
return self.get_query_set().distinct(*args, **kwargs)
def extra(self, *args, **kwargs):
return self.get_query_set().extra(*args, **kwargs)
def get(self, *args, **kwargs):
return self.get_query_set().get(*args, **kwargs)
def get_or_create(self, **kwargs):
return self.get_query_set().get_or_create(**kwargs)
def create(self, **kwargs):
return self.get_query_set().create(**kwargs)
def filter(self, *args, **kwargs):
return self.get_query_set().filter(*args, **kwargs)
def aggregate(self, *args, **kwargs):
return self.get_query_set().aggregate(*args, **kwargs)
def annotate(self, *args, **kwargs):
return self.get_query_set().annotate(*args, **kwargs)
def complex_filter(self, *args, **kwargs):
return self.get_query_set().complex_filter(*args, **kwargs)
def exclude(self, *args, **kwargs):
return self.get_query_set().exclude(*args, **kwargs)
def in_bulk(self, *args, **kwargs):
return self.get_query_set().in_bulk(*args, **kwargs)
def iterator(self, *args, **kwargs):
return self.get_query_set().iterator(*args, **kwargs)
def latest(self, *args, **kwargs):
return self.get_query_set().latest(*args, **kwargs)
def order_by(self, *args, **kwargs):
return self.get_query_set().order_by(*args, **kwargs)
def select_related(self, *args, **kwargs):
return self.get_query_set().select_related(*args, **kwargs)
def values(self, *args, **kwargs):
return self.get_query_set().values(*args, **kwargs)
def values_list(self, *args, **kwargs):
return self.get_query_set().values_list(*args, **kwargs)
def update(self, *args, **kwargs):
return self.get_query_set().update(*args, **kwargs)
def reverse(self, *args, **kwargs):
return self.get_query_set().reverse(*args, **kwargs)
def defer(self, *args, **kwargs):
return self.get_query_set().defer(*args, **kwargs)
def only(self, *args, **kwargs):
return self.get_query_set().only(*args, **kwargs)
def using(self, *args, **kwargs):
return self.get_query_set().using(*args, **kwargs)
def exists(self, *args, **kwargs):
return self.get_query_set().exists(*args, **kwargs)
def _insert(self, values, **kwargs):
return insert_query(self.model, values, **kwargs)
def _update(self, values, **kwargs):
return self.get_query_set()._update(values, **kwargs)
def raw(self, raw_query, params=None, *args, **kwargs):
return RawQuerySet(raw_query=raw_query, model=self.model, params=params, using=self._db, *args, **kwargs)
class ManagerDescriptor(object):
# This class ensures managers aren't accessible via model instances.
# For example, Poll.objects works, but poll_obj.objects raises AttributeError.
def __init__(self, manager):
self.manager = manager
def __get__(self, instance, type=None):
if instance != None:
raise AttributeError("Manager isn't accessible via %s instances" % type.__name__)
return self.manager
class EmptyManager(Manager):
def get_query_set(self):
return self.get_empty_query_set()
| apache-2.0 |
degustaf/blog | rabbithole_python/rabbithole_python/settings.py | 1 | 2778 | """
Django settings for rabbithole_python project.
Generated by 'django-admin startproject' using Django 1.8.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import rabbithole_python.secret_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
#SECRET_KEY = '#$y=kxow(@x=eqmd&=hvwos0w$4jp^3=gfgh)cd1(jv@r$#%ch'
SECRET_KEY = rabbithole_python.secret_settings.SECRET_KEY
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'rabbithole_python.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rabbithole_python.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
| apache-2.0 |
Sylrob434/CouchPotatoServer | libs/suds/properties.py | 203 | 16223 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Properties classes.
"""
from logging import getLogger
log = getLogger(__name__)
class AutoLinker(object):
"""
Base class, provides interface for I{automatic} link
management between a L{Properties} object and the L{Properties}
contained within I{values}.
"""
def updated(self, properties, prev, next):
"""
Notification that a values was updated and the linkage
between the I{properties} contained with I{prev} need to
be relinked to the L{Properties} contained within the
I{next} value.
"""
pass
class Link(object):
"""
Property link object.
@ivar endpoints: A tuple of the (2) endpoints of the link.
@type endpoints: tuple(2)
"""
def __init__(self, a, b):
"""
@param a: Property (A) to link.
@type a: L{Property}
@param b: Property (B) to link.
@type b: L{Property}
"""
pA = Endpoint(self, a)
pB = Endpoint(self, b)
self.endpoints = (pA, pB)
self.validate(a, b)
a.links.append(pB)
b.links.append(pA)
def validate(self, pA, pB):
"""
Validate that the two properties may be linked.
@param pA: Endpoint (A) to link.
@type pA: L{Endpoint}
@param pB: Endpoint (B) to link.
@type pB: L{Endpoint}
@return: self
@rtype: L{Link}
"""
if pA in pB.links or \
pB in pA.links:
raise Exception, 'Already linked'
dA = pA.domains()
dB = pB.domains()
for d in dA:
if d in dB:
raise Exception, 'Duplicate domain "%s" found' % d
for d in dB:
if d in dA:
raise Exception, 'Duplicate domain "%s" found' % d
kA = pA.keys()
kB = pB.keys()
for k in kA:
if k in kB:
raise Exception, 'Duplicate key %s found' % k
for k in kB:
if k in kA:
raise Exception, 'Duplicate key %s found' % k
return self
def teardown(self):
"""
Teardown the link.
Removes endpoints from properties I{links} collection.
@return: self
@rtype: L{Link}
"""
pA, pB = self.endpoints
if pA in pB.links:
pB.links.remove(pA)
if pB in pA.links:
pA.links.remove(pB)
return self
class Endpoint(object):
"""
Link endpoint (wrapper).
@ivar link: The associated link.
@type link: L{Link}
@ivar target: The properties object.
@type target: L{Property}
"""
def __init__(self, link, target):
self.link = link
self.target = target
def teardown(self):
return self.link.teardown()
def __eq__(self, rhs):
return ( self.target == rhs )
def __hash__(self):
return hash(self.target)
def __getattr__(self, name):
return getattr(self.target, name)
class Definition:
"""
Property definition.
@ivar name: The property name.
@type name: str
@ivar classes: The (class) list of permitted values
@type classes: tuple
@ivar default: The default value.
@ivar type: any
"""
def __init__(self, name, classes, default, linker=AutoLinker()):
"""
@param name: The property name.
@type name: str
@param classes: The (class) list of permitted values
@type classes: tuple
@param default: The default value.
@type default: any
"""
if not isinstance(classes, (list, tuple)):
classes = (classes,)
self.name = name
self.classes = classes
self.default = default
self.linker = linker
def nvl(self, value=None):
"""
Convert the I{value} into the default when I{None}.
@param value: The proposed value.
@type value: any
@return: The I{default} when I{value} is I{None}, else I{value}.
@rtype: any
"""
if value is None:
return self.default
else:
return value
def validate(self, value):
"""
Validate the I{value} is of the correct class.
@param value: The value to validate.
@type value: any
@raise AttributeError: When I{value} is invalid.
"""
if value is None:
return
if len(self.classes) and \
not isinstance(value, self.classes):
msg = '"%s" must be: %s' % (self.name, self.classes)
raise AttributeError,msg
def __repr__(self):
return '%s: %s' % (self.name, str(self))
def __str__(self):
s = []
if len(self.classes):
s.append('classes=%s' % str(self.classes))
else:
s.append('classes=*')
s.append("default=%s" % str(self.default))
return ', '.join(s)
class Properties:
"""
Represents basic application properties.
Provides basic type validation, default values and
link/synchronization behavior.
@ivar domain: The domain name.
@type domain: str
@ivar definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@ivar links: A list of linked property objects used to create
a network of properties.
@type links: [L{Property},..]
@ivar defined: A dict of property values.
@type defined: dict
"""
def __init__(self, domain, definitions, kwargs):
"""
@param domain: The property domain name.
@type domain: str
@param definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@param kwargs: A list of property name/values to set.
@type kwargs: dict
"""
self.definitions = {}
for d in definitions:
self.definitions[d.name] = d
self.domain = domain
self.links = []
self.defined = {}
self.modified = set()
self.prime()
self.update(kwargs)
def definition(self, name):
"""
Get the definition for the property I{name}.
@param name: The property I{name} to find the definition for.
@type name: str
@return: The property definition
@rtype: L{Definition}
@raise AttributeError: On not found.
"""
d = self.definitions.get(name)
if d is None:
raise AttributeError(name)
return d
def update(self, other):
"""
Update the property values as specified by keyword/value.
@param other: An object to update from.
@type other: (dict|L{Properties})
@return: self
@rtype: L{Properties}
"""
if isinstance(other, Properties):
other = other.defined
for n,v in other.items():
self.set(n, v)
return self
def notset(self, name):
"""
Get whether a property has never been set by I{name}.
@param name: A property name.
@type name: str
@return: True if never been set.
@rtype: bool
"""
self.provider(name).__notset(name)
def set(self, name, value):
"""
Set the I{value} of a property by I{name}.
The value is validated against the definition and set
to the default when I{value} is None.
@param name: The property name.
@type name: str
@param value: The new property value.
@type value: any
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, value)
return self
def unset(self, name):
"""
Unset a property by I{name}.
@param name: A property name.
@type name: str
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, None)
return self
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.provider(name).__get(name, *df)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
Link(self, other)
return self
def unlink(self, *others):
"""
Unlink (disassociate) the specified properties object.
@param others: The list object to unlink. Unspecified means unlink all.
@type others: [L{Properties},..]
@return: self
@rtype: L{Properties}
"""
if not len(others):
others = self.links[:]
for p in self.links[:]:
if p in others:
p.teardown()
return self
def provider(self, name, history=None):
"""
Find the provider of the property by I{name}.
@param name: The property name.
@type name: str
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: The provider when found. Otherwise, None (when nested)
and I{self} when not nested.
@rtype: L{Properties}
"""
if history is None:
history = []
history.append(self)
if name in self.definitions:
return self
for x in self.links:
if x in history:
continue
provider = x.provider(name, history)
if provider is not None:
return provider
history.remove(self)
if len(history):
return None
return self
def keys(self, history=None):
"""
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
keys = set()
keys.update(self.definitions.keys())
for x in self.links:
if x in history:
continue
keys.update(x.keys(history))
history.remove(self)
return keys
def domains(self, history=None):
"""
Get the set of I{all} domain names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of domain names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
domains = set()
domains.add(self.domain)
for x in self.links:
if x in history:
continue
domains.update(x.domains(history))
history.remove(self)
return domains
def prime(self):
"""
Prime the stored values based on default values
found in property definitions.
@return: self
@rtype: L{Properties}
"""
for d in self.definitions.values():
self.defined[d.name] = d.default
return self
def __notset(self, name):
return not (name in self.modified)
def __set(self, name, value):
d = self.definition(name)
d.validate(value)
value = d.nvl(value)
prev = self.defined[name]
self.defined[name] = value
self.modified.add(name)
d.linker.updated(self, prev, value)
def __get(self, name, *df):
d = self.definition(name)
value = self.defined.get(name)
if value == d.default and len(df):
value = df[0]
return value
def str(self, history):
s = []
s.append('Definitions:')
for d in self.definitions.values():
s.append('\t%s' % repr(d))
s.append('Content:')
for d in self.defined.items():
s.append('\t%s' % str(d))
if self not in history:
history.append(self)
s.append('Linked:')
for x in self.links:
s.append(x.str(history))
history.remove(self)
return '\n'.join(s)
def __repr__(self):
return str(self)
def __str__(self):
return self.str([])
class Skin(object):
"""
The meta-programming I{skin} around the L{Properties} object.
@ivar __pts__: The wrapped object.
@type __pts__: L{Properties}.
"""
def __init__(self, domain, definitions, kwargs):
self.__pts__ = Properties(domain, definitions, kwargs)
def __setattr__(self, name, value):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
self.__dict__[name] = value
return
self.__pts__.set(name, value)
def __getattr__(self, name):
return self.__pts__.get(name)
def __repr__(self):
return str(self)
def __str__(self):
return str(self.__pts__)
class Unskin(object):
def __new__(self, *args, **kwargs):
return args[0].__pts__
class Inspector:
"""
Wrapper inspector.
"""
def __init__(self, options):
self.properties = options.__pts__
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.properties.get(name, *df)
def update(self, **kwargs):
"""
Update the property values as specified by keyword/value.
@param kwargs: A list of property name/values to set.
@type kwargs: dict
@return: self
@rtype: L{Properties}
"""
return self.properties.update(**kwargs)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.link(p)
def unlink(self, other):
"""
Unlink (disassociate) the specified properties object.
@param other: The object to unlink.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.unlink(p)
| gpl-3.0 |
fduraffourg/servo | tests/wpt/harness/wptrunner/environment.py | 99 | 7294 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import os
import multiprocessing
import signal
import socket
import sys
import time
from mozlog import get_default_logger, handlers
from wptlogging import LogLevelRewriter
here = os.path.split(__file__)[0]
serve = None
sslutils = None
hostnames = ["web-platform.test",
"www.web-platform.test",
"www1.web-platform.test",
"www2.web-platform.test",
"xn--n8j6ds53lwwkrqhv28a.web-platform.test",
"xn--lve-6lad.web-platform.test"]
def do_delayed_imports(logger, test_paths):
global serve, sslutils
serve_root = serve_path(test_paths)
sys.path.insert(0, serve_root)
failed = []
try:
from tools.serve import serve
except ImportError:
failed.append("serve")
try:
import sslutils
except ImportError:
failed.append("sslutils")
if failed:
logger.critical(
"Failed to import %s. Ensure that tests path %s contains web-platform-tests" %
(", ".join(failed), serve_root))
sys.exit(1)
def serve_path(test_paths):
return test_paths["/"]["tests_path"]
def get_ssl_kwargs(**kwargs):
if kwargs["ssl_type"] == "openssl":
args = {"openssl_binary": kwargs["openssl_binary"]}
elif kwargs["ssl_type"] == "pregenerated":
args = {"host_key_path": kwargs["host_key_path"],
"host_cert_path": kwargs["host_cert_path"],
"ca_cert_path": kwargs["ca_cert_path"]}
else:
args = {}
return args
def ssl_env(logger, **kwargs):
ssl_env_cls = sslutils.environments[kwargs["ssl_type"]]
return ssl_env_cls(logger, **get_ssl_kwargs(**kwargs))
class TestEnvironmentError(Exception):
pass
class TestEnvironment(object):
def __init__(self, test_paths, ssl_env, pause_after_test, debug_info, options):
"""Context manager that owns the test environment i.e. the http and
websockets servers"""
self.test_paths = test_paths
self.ssl_env = ssl_env
self.server = None
self.config = None
self.external_config = None
self.pause_after_test = pause_after_test
self.test_server_port = options.pop("test_server_port", True)
self.debug_info = debug_info
self.options = options if options is not None else {}
self.cache_manager = multiprocessing.Manager()
self.stash = serve.stash.StashServer()
def __enter__(self):
self.stash.__enter__()
self.ssl_env.__enter__()
self.cache_manager.__enter__()
self.setup_server_logging()
self.config = self.load_config()
serve.set_computed_defaults(self.config)
self.external_config, self.servers = serve.start(self.config, self.ssl_env,
self.get_routes())
if self.options.get("supports_debugger") and self.debug_info and self.debug_info.interactive:
self.ignore_interrupts()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.process_interrupts()
for scheme, servers in self.servers.iteritems():
for port, server in servers:
server.kill()
self.cache_manager.__exit__(exc_type, exc_val, exc_tb)
self.ssl_env.__exit__(exc_type, exc_val, exc_tb)
self.stash.__exit__()
def ignore_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_IGN)
def process_interrupts(self):
signal.signal(signal.SIGINT, signal.SIG_DFL)
def load_config(self):
default_config_path = os.path.join(serve_path(self.test_paths), "config.default.json")
local_config_path = os.path.join(here, "config.json")
with open(default_config_path) as f:
default_config = json.load(f)
with open(local_config_path) as f:
data = f.read()
local_config = json.loads(data % self.options)
#TODO: allow non-default configuration for ssl
local_config["external_host"] = self.options.get("external_host", None)
local_config["ssl"]["encrypt_after_connect"] = self.options.get("encrypt_after_connect", False)
config = serve.merge_json(default_config, local_config)
config["doc_root"] = serve_path(self.test_paths)
if not self.ssl_env.ssl_enabled:
config["ports"]["https"] = [None]
host = self.options.get("certificate_domain", config["host"])
hosts = [host]
hosts.extend("%s.%s" % (item[0], host) for item in serve.get_subdomains(host).values())
key_file, certificate = self.ssl_env.host_cert_path(hosts)
config["key_file"] = key_file
config["certificate"] = certificate
return config
def setup_server_logging(self):
server_logger = get_default_logger(component="wptserve")
assert server_logger is not None
log_filter = handlers.LogLevelFilter(lambda x:x, "info")
# Downgrade errors to warnings for the server
log_filter = LogLevelRewriter(log_filter, ["error"], "warning")
server_logger.component_filter = log_filter
try:
#Set as the default logger for wptserve
serve.set_logger(server_logger)
serve.logger = server_logger
except Exception:
# This happens if logging has already been set up for wptserve
pass
def get_routes(self):
route_builder = serve.RoutesBuilder()
for path, format_args, content_type, route in [
("testharness_runner.html", {}, "text/html", "/testharness_runner.html"),
(self.options.get("testharnessreport", "testharnessreport.js"),
{"output": self.pause_after_test}, "text/javascript",
"/resources/testharnessreport.js")]:
path = os.path.normpath(os.path.join(here, path))
route_builder.add_static(path, format_args, content_type, route)
for url_base, paths in self.test_paths.iteritems():
if url_base == "/":
continue
route_builder.add_mount_point(url_base, paths["tests_path"])
if "/" not in self.test_paths:
del route_builder.mountpoint_routes["/"]
return route_builder.get_routes()
def ensure_started(self):
# Pause for a while to ensure that the server has a chance to start
time.sleep(2)
for scheme, servers in self.servers.iteritems():
for port, server in servers:
if self.test_server_port:
s = socket.socket()
try:
s.connect((self.config["host"], port))
except socket.error:
raise EnvironmentError(
"%s server on port %d failed to start" % (scheme, port))
finally:
s.close()
if not server.is_alive():
raise EnvironmentError("%s server on port %d failed to start" % (scheme, port))
| mpl-2.0 |
ariebovenberg/snug | src/snug/pagination.py | 2 | 4726 | """Tools for creating paginated queries
.. versionadded:: 1.2
"""
import abc
import typing as t
from operator import attrgetter
from .query import Query, async_executor, executor
__all__ = ["paginated", "Page", "Pagelike"]
T = t.TypeVar("T")
class Pagelike(t.Generic[T]):
"""Abstract base class for page-like objects.
Any object implementing the attributes
:py:attr:`~Pagelike.content` and :py:attr:`~Pagelike.next_query`
implements this interface.
A query returning such an object may be :class:`paginated`.
Note
----
Pagelike is a :class:`~typing.Generic`.
This means you may write ``Pagelike[<type-of-content>]``
as a descriptive type annotation.
For example: ``Pagelike[List[str]]`` indicates a page-like
object whose ``content`` is a list of strings.
"""
__slots__ = ()
@abc.abstractproperty
def content(self):
"""The contents of the page.
Returns
-------
T
The page content.
"""
raise NotImplementedError()
@abc.abstractproperty
def next_query(self):
"""The query to retrieve the next page,
or ``None`` if there is no next page.
Returns
-------
~snug.Query[Pagelike[T]]] or None
The next query.
"""
raise NotImplementedError()
class Page(Pagelike[T]):
"""A simple :class:`Pagelike` object
Parameters
----------
content: T
The page content.
next_query: ~snug.Query[Pagelike[T]]] or None
The query to retrieve the next page.
"""
__slots__ = "_content", "_next_query"
def __init__(self, content, next_query=None):
self._content, self._next_query = content, next_query
content = property(attrgetter("_content"))
next_query = property(attrgetter("_next_query"))
def __repr__(self):
return "Page({})".format(self._content)
class paginated(Query[t.Union[t.Iterator[T], t.AsyncIterator[T]]]):
"""A paginated version of a query.
Executing it returns an :term:`iterator`
or :term:`async iterator <asynchronous iterator>`.
If the wrapped query is reusable,
the paginated query is also reusable.
Parameters
----------
query: Query[Pagelike[T]]
The query to paginate.
This query must return a :class:`Pagelike` object.
Example
-------
.. code-block:: python
def foo_page(...) -> Query[Pagelike[Foo]] # example query
...
return Page(...)
query = paginated(foo_page(...))
for foo in execute(query):
...
async for foo in execute_async(query):
...
"""
__slots__ = "_query"
def __init__(self, query):
self._query = query
def __execute__(self, client, auth):
"""Execute the paginated query.
Returns
-------
~typing.Iterator[T]
An iterator yielding page content.
"""
return Paginator(self._query, executor(client=client, auth=auth))
def __execute_async__(self, client, auth):
"""Execute the paginated query asynchronously.
Note
----
This method does not need to be awaited.
Returns
-------
~typing.AsyncIterator[T]
An asynchronous iterator yielding page content.
"""
return AsyncPaginator(
self._query, async_executor(client=client, auth=auth)
)
def __repr__(self):
return "paginated({})".format(self._query)
class Paginator(t.Iterator[T]):
"""An iterator which keeps executing the next query in the page sequece,
returning the page content."""
__slots__ = "_executor", "_next_query"
def __init__(self, next_query, executor):
self._next_query, self._executor = next_query, executor
def __iter__(self):
return self
def __next__(self):
if self._next_query is None:
raise StopIteration()
page = self._executor(self._next_query)
self._next_query = page.next_query
return page.content
class AsyncPaginator(t.AsyncIterator[T]):
"""An async iterator which keeps executing
the next query in the page sequence"""
__slots__ = "_executor", "_next_query"
def __init__(self, next_query, executor):
self._next_query, self._executor = next_query, executor
def __aiter__(self):
return self
async def __anext__(self):
"""the content of the next page"""
if self._next_query is None:
raise StopAsyncIteration()
page = await self._executor(self._next_query)
self._next_query = page.next_query
return page.content
| mit |
MikeBirdsall/food-log | foodlog/create_db.py | 1 | 1868 | #!/usr/bin/env python3
""" Create database from ini files
I started out using ini files as the first input and primary backup method
for the data. I'm changing that to be more database centric, with the
backup being done by a file containing a log of the sql statements used to
update the database. This version will create the database, but also save
a copy of the sql used to create it.
"""
import os
import argparse
import sqlite3
from foodlog.my_info import config_path
class ConstructDatabase:
def __init__(self, args):
config = config_path()
sqlite_file = args.sqlite_file or config.dir("DB_FILE")
log_file = args.log_file or config.dir("DB_LOG")
if os.path.exists(sqlite_file):
open(sqlite_file, 'w').close()
self.conn = sqlite3.connect(sqlite_file)
self.cursor = self.conn.cursor()
self.log_file = open(log_file, 'w')
def process(self):
self.create_tables()
self.conn.commit()
self.conn.close()
def create_tables(self):
""" Create tables from schema in file tables.sql """
with open('tables.sql', 'r') as sqlite_file:
schema = sqlite_file.read()
self.conn.executescript(schema)
print(schema, file=self.log_file)
def main():
""" Commandline program to create food diary database from ini files """
parser = argparse.ArgumentParser(
description="create sqlite file from ini files")
parser.add_argument("input_paths", type=str, nargs="*",
help="path to ini files")
parser.add_argument("--sqlite_file", '-d', type=str,
help="output database file")
parser.add_argument("--log_file", '-l', type=str,
help="output sql log file")
args = parser.parse_args()
ConstructDatabase(args).process()
if __name__ == '__main__':
main()
| mit |
magenta/magenta | magenta/models/sketch_rnn/model.py | 1 | 17487 | # Copyright 2021 The Magenta Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Sketch-RNN Model."""
import random
from magenta.contrib import training as contrib_training
from magenta.models.sketch_rnn import rnn
import numpy as np
import tensorflow.compat.v1 as tf
def copy_hparams(hparams):
"""Return a copy of an HParams instance."""
return contrib_training.HParams(**hparams.values())
def get_default_hparams():
"""Return default HParams for sketch-rnn."""
hparams = contrib_training.HParams(
data_set=['aaron_sheep.npz'], # Our dataset.
num_steps=10000000, # Total number of steps of training. Keep large.
save_every=500, # Number of batches per checkpoint creation.
max_seq_len=250, # Not used. Will be changed by model. [Eliminate?]
dec_rnn_size=512, # Size of decoder.
dec_model='lstm', # Decoder: lstm, layer_norm or hyper.
enc_rnn_size=256, # Size of encoder.
enc_model='lstm', # Encoder: lstm, layer_norm or hyper.
z_size=128, # Size of latent vector z. Recommend 32, 64 or 128.
kl_weight=0.5, # KL weight of loss equation. Recommend 0.5 or 1.0.
kl_weight_start=0.01, # KL start weight when annealing.
kl_tolerance=0.2, # Level of KL loss at which to stop optimizing for KL.
batch_size=100, # Minibatch size. Recommend leaving at 100.
grad_clip=1.0, # Gradient clipping. Recommend leaving at 1.0.
num_mixture=20, # Number of mixtures in Gaussian mixture model.
learning_rate=0.001, # Learning rate.
decay_rate=0.9999, # Learning rate decay per minibatch.
kl_decay_rate=0.99995, # KL annealing decay rate per minibatch.
min_learning_rate=0.00001, # Minimum learning rate.
use_recurrent_dropout=True, # Dropout with memory loss. Recommended
recurrent_dropout_prob=0.90, # Probability of recurrent dropout keep.
use_input_dropout=False, # Input dropout. Recommend leaving False.
input_dropout_prob=0.90, # Probability of input dropout keep.
use_output_dropout=False, # Output dropout. Recommend leaving False.
output_dropout_prob=0.90, # Probability of output dropout keep.
random_scale_factor=0.15, # Random scaling data augmentation proportion.
augment_stroke_prob=0.10, # Point dropping augmentation proportion.
conditional=True, # When False, use unconditional decoder-only model.
is_training=True # Is model training? Recommend keeping true.
)
return hparams
class Model(object):
"""Define a SketchRNN model."""
def __init__(self, hps, gpu_mode=True, reuse=False):
"""Initializer for the SketchRNN model.
Args:
hps: a HParams object containing model hyperparameters
gpu_mode: a boolean that when True, uses GPU mode.
reuse: a boolean that when true, attemps to reuse variables.
"""
self.hps = hps
with tf.variable_scope('vector_rnn', reuse=reuse):
if not gpu_mode:
with tf.device('/cpu:0'):
tf.logging.info('Model using cpu.')
self.build_model(hps)
else:
tf.logging.info('Model using gpu.')
self.build_model(hps)
def encoder(self, batch, sequence_lengths):
"""Define the bi-directional encoder module of sketch-rnn."""
unused_outputs, last_states = tf.nn.bidirectional_dynamic_rnn(
self.enc_cell_fw,
self.enc_cell_bw,
batch,
sequence_length=sequence_lengths,
time_major=False,
swap_memory=True,
dtype=tf.float32,
scope='ENC_RNN')
last_state_fw, last_state_bw = last_states
last_h_fw = self.enc_cell_fw.get_output(last_state_fw)
last_h_bw = self.enc_cell_bw.get_output(last_state_bw)
last_h = tf.concat([last_h_fw, last_h_bw], 1)
mu = rnn.super_linear(
last_h,
self.hps.z_size,
input_size=self.hps.enc_rnn_size * 2, # bi-dir, so x2
scope='ENC_RNN_mu',
init_w='gaussian',
weight_start=0.001)
presig = rnn.super_linear(
last_h,
self.hps.z_size,
input_size=self.hps.enc_rnn_size * 2, # bi-dir, so x2
scope='ENC_RNN_sigma',
init_w='gaussian',
weight_start=0.001)
return mu, presig
def build_model(self, hps):
"""Define model architecture."""
if hps.is_training:
self.global_step = tf.Variable(0, name='global_step', trainable=False)
if hps.dec_model == 'lstm':
cell_fn = rnn.LSTMCell
elif hps.dec_model == 'layer_norm':
cell_fn = rnn.LayerNormLSTMCell
elif hps.dec_model == 'hyper':
cell_fn = rnn.HyperLSTMCell
else:
assert False, 'please choose a respectable cell'
if hps.enc_model == 'lstm':
enc_cell_fn = rnn.LSTMCell
elif hps.enc_model == 'layer_norm':
enc_cell_fn = rnn.LayerNormLSTMCell
elif hps.enc_model == 'hyper':
enc_cell_fn = rnn.HyperLSTMCell
else:
assert False, 'please choose a respectable cell'
use_recurrent_dropout = self.hps.use_recurrent_dropout
use_input_dropout = self.hps.use_input_dropout
use_output_dropout = self.hps.use_output_dropout
cell = cell_fn(
hps.dec_rnn_size,
use_recurrent_dropout=use_recurrent_dropout,
dropout_keep_prob=self.hps.recurrent_dropout_prob)
if hps.conditional: # vae mode:
if hps.enc_model == 'hyper':
self.enc_cell_fw = enc_cell_fn(
hps.enc_rnn_size,
use_recurrent_dropout=use_recurrent_dropout,
dropout_keep_prob=self.hps.recurrent_dropout_prob)
self.enc_cell_bw = enc_cell_fn(
hps.enc_rnn_size,
use_recurrent_dropout=use_recurrent_dropout,
dropout_keep_prob=self.hps.recurrent_dropout_prob)
else:
self.enc_cell_fw = enc_cell_fn(
hps.enc_rnn_size,
use_recurrent_dropout=use_recurrent_dropout,
dropout_keep_prob=self.hps.recurrent_dropout_prob)
self.enc_cell_bw = enc_cell_fn(
hps.enc_rnn_size,
use_recurrent_dropout=use_recurrent_dropout,
dropout_keep_prob=self.hps.recurrent_dropout_prob)
# dropout:
tf.logging.info('Input dropout mode = %s.', use_input_dropout)
tf.logging.info('Output dropout mode = %s.', use_output_dropout)
tf.logging.info('Recurrent dropout mode = %s.', use_recurrent_dropout)
if use_input_dropout:
tf.logging.info('Dropout to input w/ keep_prob = %4.4f.',
self.hps.input_dropout_prob)
cell = tf.nn.rnn_cell.DropoutWrapper(
cell, input_keep_prob=self.hps.input_dropout_prob)
if use_output_dropout:
tf.logging.info('Dropout to output w/ keep_prob = %4.4f.',
self.hps.output_dropout_prob)
cell = tf.nn.rnn_cell.DropoutWrapper(
cell, output_keep_prob=self.hps.output_dropout_prob)
self.cell = cell
self.sequence_lengths = tf.placeholder(
dtype=tf.int32, shape=[self.hps.batch_size])
self.input_data = tf.placeholder(
dtype=tf.float32,
shape=[self.hps.batch_size, self.hps.max_seq_len + 1, 5])
# The target/expected vectors of strokes
self.output_x = self.input_data[:, 1:self.hps.max_seq_len + 1, :]
# vectors of strokes to be fed to decoder (same as above, but lagged behind
# one step to include initial dummy value of (0, 0, 1, 0, 0))
self.input_x = self.input_data[:, :self.hps.max_seq_len, :]
# either do vae-bit and get z, or do unconditional, decoder-only
if hps.conditional: # vae mode:
self.mean, self.presig = self.encoder(self.output_x,
self.sequence_lengths)
self.sigma = tf.exp(self.presig / 2.0) # sigma > 0. div 2.0 -> sqrt.
eps = tf.random_normal(
(self.hps.batch_size, self.hps.z_size), 0.0, 1.0, dtype=tf.float32)
self.batch_z = self.mean + tf.multiply(self.sigma, eps)
# KL cost
self.kl_cost = -0.5 * tf.reduce_mean(
(1 + self.presig - tf.square(self.mean) - tf.exp(self.presig)))
self.kl_cost = tf.maximum(self.kl_cost, self.hps.kl_tolerance)
pre_tile_y = tf.reshape(self.batch_z,
[self.hps.batch_size, 1, self.hps.z_size])
overlay_x = tf.tile(pre_tile_y, [1, self.hps.max_seq_len, 1])
actual_input_x = tf.concat([self.input_x, overlay_x], 2)
self.initial_state = tf.nn.tanh(
rnn.super_linear(
self.batch_z,
cell.state_size,
init_w='gaussian',
weight_start=0.001,
input_size=self.hps.z_size))
else: # unconditional, decoder-only generation
self.batch_z = tf.zeros(
(self.hps.batch_size, self.hps.z_size), dtype=tf.float32)
self.kl_cost = tf.zeros([], dtype=tf.float32)
actual_input_x = self.input_x
self.initial_state = cell.zero_state(
batch_size=hps.batch_size, dtype=tf.float32)
self.num_mixture = hps.num_mixture
# TODO(deck): Better understand this comment.
# Number of outputs is 3 (one logit per pen state) plus 6 per mixture
# component: mean_x, stdev_x, mean_y, stdev_y, correlation_xy, and the
# mixture weight/probability (Pi_k)
n_out = (3 + self.num_mixture * 6)
with tf.variable_scope('RNN'):
output_w = tf.get_variable('output_w', [self.hps.dec_rnn_size, n_out])
output_b = tf.get_variable('output_b', [n_out])
# decoder module of sketch-rnn is below
output, last_state = tf.nn.dynamic_rnn(
cell,
actual_input_x,
initial_state=self.initial_state,
time_major=False,
swap_memory=True,
dtype=tf.float32,
scope='RNN')
output = tf.reshape(output, [-1, hps.dec_rnn_size])
output = tf.nn.xw_plus_b(output, output_w, output_b)
self.final_state = last_state
# NB: the below are inner functions, not methods of Model
def tf_2d_normal(x1, x2, mu1, mu2, s1, s2, rho):
"""Returns result of eq # 24 of http://arxiv.org/abs/1308.0850."""
norm1 = tf.subtract(x1, mu1)
norm2 = tf.subtract(x2, mu2)
s1s2 = tf.multiply(s1, s2)
# eq 25
z = (tf.square(tf.div(norm1, s1)) + tf.square(tf.div(norm2, s2)) -
2 * tf.div(tf.multiply(rho, tf.multiply(norm1, norm2)), s1s2))
neg_rho = 1 - tf.square(rho)
result = tf.exp(tf.div(-z, 2 * neg_rho))
denom = 2 * np.pi * tf.multiply(s1s2, tf.sqrt(neg_rho))
result = tf.div(result, denom)
return result
def get_lossfunc(z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr,
z_pen_logits, x1_data, x2_data, pen_data):
"""Returns a loss fn based on eq #26 of http://arxiv.org/abs/1308.0850."""
# This represents the L_R only (i.e. does not include the KL loss term).
result0 = tf_2d_normal(x1_data, x2_data, z_mu1, z_mu2, z_sigma1, z_sigma2,
z_corr)
epsilon = 1e-6
# result1 is the loss wrt pen offset (L_s in equation 9 of
# https://arxiv.org/pdf/1704.03477.pdf)
result1 = tf.multiply(result0, z_pi)
result1 = tf.reduce_sum(result1, 1, keep_dims=True)
result1 = -tf.log(result1 + epsilon) # avoid log(0)
fs = 1.0 - pen_data[:, 2] # use training data for this
fs = tf.reshape(fs, [-1, 1])
# Zero out loss terms beyond N_s, the last actual stroke
result1 = tf.multiply(result1, fs)
# result2: loss wrt pen state, (L_p in equation 9)
result2 = tf.nn.softmax_cross_entropy_with_logits(
labels=pen_data, logits=z_pen_logits)
result2 = tf.reshape(result2, [-1, 1])
if not self.hps.is_training: # eval mode, mask eos columns
result2 = tf.multiply(result2, fs)
result = result1 + result2
return result
# below is where we need to do MDN (Mixture Density Network) splitting of
# distribution params
def get_mixture_coef(output):
"""Returns the tf slices containing mdn dist params."""
# This uses eqns 18 -> 23 of http://arxiv.org/abs/1308.0850.
z = output
z_pen_logits = z[:, 0:3] # pen states
z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr = tf.split(z[:, 3:], 6, 1)
# process output z's into MDN parameters
# softmax all the pi's and pen states:
z_pi = tf.nn.softmax(z_pi)
z_pen = tf.nn.softmax(z_pen_logits)
# exponentiate the sigmas and also make corr between -1 and 1.
z_sigma1 = tf.exp(z_sigma1)
z_sigma2 = tf.exp(z_sigma2)
z_corr = tf.tanh(z_corr)
r = [z_pi, z_mu1, z_mu2, z_sigma1, z_sigma2, z_corr, z_pen, z_pen_logits]
return r
out = get_mixture_coef(output)
[o_pi, o_mu1, o_mu2, o_sigma1, o_sigma2, o_corr, o_pen, o_pen_logits] = out
self.pi = o_pi
self.mu1 = o_mu1
self.mu2 = o_mu2
self.sigma1 = o_sigma1
self.sigma2 = o_sigma2
self.corr = o_corr
self.pen_logits = o_pen_logits
# pen state probabilities (result of applying softmax to self.pen_logits)
self.pen = o_pen
# reshape target data so that it is compatible with prediction shape
target = tf.reshape(self.output_x, [-1, 5])
[x1_data, x2_data, eos_data, eoc_data, cont_data] = tf.split(target, 5, 1)
pen_data = tf.concat([eos_data, eoc_data, cont_data], 1)
lossfunc = get_lossfunc(o_pi, o_mu1, o_mu2, o_sigma1, o_sigma2, o_corr,
o_pen_logits, x1_data, x2_data, pen_data)
self.r_cost = tf.reduce_mean(lossfunc)
if self.hps.is_training:
self.lr = tf.Variable(self.hps.learning_rate, trainable=False)
optimizer = tf.train.AdamOptimizer(self.lr)
self.kl_weight = tf.Variable(self.hps.kl_weight_start, trainable=False)
self.cost = self.r_cost + self.kl_cost * self.kl_weight
gvs = optimizer.compute_gradients(self.cost)
g = self.hps.grad_clip
capped_gvs = [(tf.clip_by_value(grad, -g, g), var) for grad, var in gvs]
self.train_op = optimizer.apply_gradients(
capped_gvs, global_step=self.global_step, name='train_step')
def sample(sess, model, seq_len=250, temperature=1.0, greedy_mode=False,
z=None):
"""Samples a sequence from a pre-trained model."""
def adjust_temp(pi_pdf, temp):
pi_pdf = np.log(pi_pdf) / temp
pi_pdf -= pi_pdf.max()
pi_pdf = np.exp(pi_pdf)
pi_pdf /= pi_pdf.sum()
return pi_pdf
def get_pi_idx(x, pdf, temp=1.0, greedy=False):
"""Samples from a pdf, optionally greedily."""
if greedy:
return np.argmax(pdf)
pdf = adjust_temp(np.copy(pdf), temp)
accumulate = 0
for i in range(0, pdf.size):
accumulate += pdf[i]
if accumulate >= x:
return i
tf.logging.info('Error with sampling ensemble.')
return -1
def sample_gaussian_2d(mu1, mu2, s1, s2, rho, temp=1.0, greedy=False):
if greedy:
return mu1, mu2
mean = [mu1, mu2]
s1 *= temp * temp
s2 *= temp * temp
cov = [[s1 * s1, rho * s1 * s2], [rho * s1 * s2, s2 * s2]]
x = np.random.multivariate_normal(mean, cov, 1)
return x[0][0], x[0][1]
prev_x = np.zeros((1, 1, 5), dtype=np.float32)
prev_x[0, 0, 2] = 1 # initially, we want to see beginning of new stroke
if z is None:
z = np.random.randn(1, model.hps.z_size) # not used if unconditional
if not model.hps.conditional:
prev_state = sess.run(model.initial_state)
else:
prev_state = sess.run(model.initial_state, feed_dict={model.batch_z: z})
strokes = np.zeros((seq_len, 5), dtype=np.float32)
mixture_params = []
greedy = greedy_mode
temp = temperature
for i in range(seq_len):
if not model.hps.conditional:
feed = {
model.input_x: prev_x,
model.sequence_lengths: [1],
model.initial_state: prev_state
}
else:
feed = {
model.input_x: prev_x,
model.sequence_lengths: [1],
model.initial_state: prev_state,
model.batch_z: z
}
params = sess.run([
model.pi, model.mu1, model.mu2, model.sigma1, model.sigma2, model.corr,
model.pen, model.final_state
], feed)
[o_pi, o_mu1, o_mu2, o_sigma1, o_sigma2, o_corr, o_pen, next_state] = params
idx = get_pi_idx(random.random(), o_pi[0], temp, greedy)
idx_eos = get_pi_idx(random.random(), o_pen[0], temp, greedy)
eos = [0, 0, 0]
eos[idx_eos] = 1
next_x1, next_x2 = sample_gaussian_2d(o_mu1[0][idx], o_mu2[0][idx],
o_sigma1[0][idx], o_sigma2[0][idx],
o_corr[0][idx], np.sqrt(temp), greedy)
strokes[i, :] = [next_x1, next_x2, eos[0], eos[1], eos[2]]
params = [
o_pi[0], o_mu1[0], o_mu2[0], o_sigma1[0], o_sigma2[0], o_corr[0],
o_pen[0]
]
mixture_params.append(params)
prev_x = np.zeros((1, 1, 5), dtype=np.float32)
prev_x[0][0] = np.array(
[next_x1, next_x2, eos[0], eos[1], eos[2]], dtype=np.float32)
prev_state = next_state
return strokes, mixture_params
| apache-2.0 |
Eric-Zhong/odoo | addons/l10n_th/__openerp__.py | 260 | 1453 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Thailand - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
Chart of Accounts for Thailand.
===============================
Thai accounting chart and localization.
""",
'author': 'Almacom',
'website': 'http://almacom.co.th/',
'depends': ['account_chart'],
'data': [ 'account_data.xml' ],
'installable': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EricMuller/mynotes-backend | requirements/twisted/Twisted-17.1.0/build/lib.linux-x86_64-3.5/twisted/python/lockfile.py | 15 | 7718 | # -*- test-case-name: twisted.test.test_lockfile -*-
# Copyright (c) 2005 Divmod, Inc.
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Filesystem-based interprocess mutex.
"""
from __future__ import absolute_import, division
import errno
import os
from time import time as _uniquefloat
from twisted.python.runtime import platform
from twisted.python.compat import _PY3
def unique():
return str(int(_uniquefloat() * 1000))
from os import rename
if not platform.isWindows():
from os import kill
from os import symlink
from os import readlink
from os import remove as rmlink
_windows = False
else:
_windows = True
# On UNIX, a symlink can be made to a nonexistent location, and
# FilesystemLock uses this by making the target of the symlink an
# imaginary, non-existing file named that of the PID of the process with
# the lock. This has some benefits on UNIX -- making and removing this
# symlink is atomic. However, because Windows doesn't support symlinks (at
# least as how we know them), we have to fake this and actually write a
# file with the PID of the process holding the lock instead.
# These functions below perform that unenviable, probably-fraught-with-
# race-conditions duty. - hawkie
try:
from win32api import OpenProcess
import pywintypes
except ImportError:
kill = None
else:
ERROR_ACCESS_DENIED = 5
ERROR_INVALID_PARAMETER = 87
def kill(pid, signal):
try:
OpenProcess(0, 0, pid)
except pywintypes.error as e:
if e.args[0] == ERROR_ACCESS_DENIED:
return
elif e.args[0] == ERROR_INVALID_PARAMETER:
raise OSError(errno.ESRCH, None)
raise
else:
raise RuntimeError("OpenProcess is required to fail.")
# For monkeypatching in tests
_open = open
def symlink(value, filename):
"""
Write a file at C{filename} with the contents of C{value}. See the
above comment block as to why this is needed.
"""
# XXX Implement an atomic thingamajig for win32
newlinkname = filename + "." + unique() + '.newlink'
newvalname = os.path.join(newlinkname, "symlink")
os.mkdir(newlinkname)
# Python 3 does not support the 'commit' flag of fopen in the MSVCRT
# (http://msdn.microsoft.com/en-us/library/yeby3zcb%28VS.71%29.aspx)
if _PY3:
mode = 'w'
else:
mode = 'wc'
with _open(newvalname, mode) as f:
f.write(value)
f.flush()
try:
rename(newlinkname, filename)
except:
os.remove(newvalname)
os.rmdir(newlinkname)
raise
def readlink(filename):
"""
Read the contents of C{filename}. See the above comment block as to why
this is needed.
"""
try:
fObj = _open(os.path.join(filename, 'symlink'), 'r')
except IOError as e:
if e.errno == errno.ENOENT or e.errno == errno.EIO:
raise OSError(e.errno, None)
raise
else:
with fObj:
result = fObj.read()
return result
def rmlink(filename):
os.remove(os.path.join(filename, 'symlink'))
os.rmdir(filename)
class FilesystemLock(object):
"""
A mutex.
This relies on the filesystem property that creating
a symlink is an atomic operation and that it will
fail if the symlink already exists. Deleting the
symlink will release the lock.
@ivar name: The name of the file associated with this lock.
@ivar clean: Indicates whether this lock was released cleanly by its
last owner. Only meaningful after C{lock} has been called and
returns True.
@ivar locked: Indicates whether the lock is currently held by this
object.
"""
clean = None
locked = False
def __init__(self, name):
self.name = name
def lock(self):
"""
Acquire this lock.
@rtype: C{bool}
@return: True if the lock is acquired, false otherwise.
@raise: Any exception os.symlink() may raise, other than
EEXIST.
"""
clean = True
while True:
try:
symlink(str(os.getpid()), self.name)
except OSError as e:
if _windows and e.errno in (errno.EACCES, errno.EIO):
# The lock is in the middle of being deleted because we're
# on Windows where lock removal isn't atomic. Give up, we
# don't know how long this is going to take.
return False
if e.errno == errno.EEXIST:
try:
pid = readlink(self.name)
except (IOError, OSError) as e:
if e.errno == errno.ENOENT:
# The lock has vanished, try to claim it in the
# next iteration through the loop.
continue
elif _windows and e.errno == errno.EACCES:
# The lock is in the middle of being
# deleted because we're on Windows where
# lock removal isn't atomic. Give up, we
# don't know how long this is going to
# take.
return False
raise
try:
if kill is not None:
kill(int(pid), 0)
except OSError as e:
if e.errno == errno.ESRCH:
# The owner has vanished, try to claim it in the
# next iteration through the loop.
try:
rmlink(self.name)
except OSError as e:
if e.errno == errno.ENOENT:
# Another process cleaned up the lock.
# Race them to acquire it in the next
# iteration through the loop.
continue
raise
clean = False
continue
raise
return False
raise
self.locked = True
self.clean = clean
return True
def unlock(self):
"""
Release this lock.
This deletes the directory with the given name.
@raise: Any exception os.readlink() may raise, or
ValueError if the lock is not owned by this process.
"""
pid = readlink(self.name)
if int(pid) != os.getpid():
raise ValueError(
"Lock %r not owned by this process" % (self.name,))
rmlink(self.name)
self.locked = False
def isLocked(name):
"""
Determine if the lock of the given name is held or not.
@type name: C{str}
@param name: The filesystem path to the lock to test
@rtype: C{bool}
@return: True if the lock is held, False otherwise.
"""
l = FilesystemLock(name)
result = None
try:
result = l.lock()
finally:
if result:
l.unlock()
return not result
__all__ = ['FilesystemLock', 'isLocked']
| mit |
Kurpilyansky/street-agitation-telegram-bot | street_agitation_bot/management/regions.py | 1 | 1552 | arr = ["Архангельск", "Астрахань", "Барнаул", "Белгород", "Березники*", "Бийск", "Братск", "Брянск", "Великий Новгород", "Владивосток", "Владимир", "Волгоград", "Вологда", "Воркута*", "Воронеж", "Выкса*", "Горно-Алтайск*", "Екатеринбург", "Ессентуки*", "Жуковский*", "Иваново", "Ижевск", "Иркутск", "Йошкар-Ола", "Казань", "Калининград", "Калуга", "Кемерово", "Киров", "Комсомольск-на-Амуре", "Королев*", "Кострома", "Краснодар", "Красноярск", "Курган", "Курск", "Магадан*", "Москва", "Мурманск", "Нижний Новгород", "Новокузнецк", "Новосибирск", "Омск", "Орел", "Оренбург", "Орск", "Пенза", "Пермь", "Псков", "Ростов-на-Дону", "Рязань", "Самара", "Санкт-Петербург", "Саранск", "Саратов", "Смоленск", "Сочи", "Ставрополь", "Сыктывкар", "Тамбов", "Тверь", "Тольятти", "Томск", "Тула", "Тюмень", "Улан-Удэ", "Ульяновск", "Уфа", "Хабаровск", "Чебоксары", "Челябинск", "Череповец", "Чита", "Ярославль"]
for name in arr:
print('\t'.join(map(str, [name, -1001147651737, 10800])))
| gpl-3.0 |
toontownfunserver/Panda3D-1.9.0 | python/Lib/encodings/koi8_u.py | 593 | 14018 | """ Python Character Mapping Codec koi8_u generated from 'python-mappings/KOI8-U.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='koi8-u',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u2500' # 0x80 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u2502' # 0x81 -> BOX DRAWINGS LIGHT VERTICAL
u'\u250c' # 0x82 -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2510' # 0x83 -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x84 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2518' # 0x85 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u251c' # 0x86 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2524' # 0x87 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\u252c' # 0x88 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u2534' # 0x89 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u253c' # 0x8A -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u2580' # 0x8B -> UPPER HALF BLOCK
u'\u2584' # 0x8C -> LOWER HALF BLOCK
u'\u2588' # 0x8D -> FULL BLOCK
u'\u258c' # 0x8E -> LEFT HALF BLOCK
u'\u2590' # 0x8F -> RIGHT HALF BLOCK
u'\u2591' # 0x90 -> LIGHT SHADE
u'\u2592' # 0x91 -> MEDIUM SHADE
u'\u2593' # 0x92 -> DARK SHADE
u'\u2320' # 0x93 -> TOP HALF INTEGRAL
u'\u25a0' # 0x94 -> BLACK SQUARE
u'\u2219' # 0x95 -> BULLET OPERATOR
u'\u221a' # 0x96 -> SQUARE ROOT
u'\u2248' # 0x97 -> ALMOST EQUAL TO
u'\u2264' # 0x98 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0x99 -> GREATER-THAN OR EQUAL TO
u'\xa0' # 0x9A -> NO-BREAK SPACE
u'\u2321' # 0x9B -> BOTTOM HALF INTEGRAL
u'\xb0' # 0x9C -> DEGREE SIGN
u'\xb2' # 0x9D -> SUPERSCRIPT TWO
u'\xb7' # 0x9E -> MIDDLE DOT
u'\xf7' # 0x9F -> DIVISION SIGN
u'\u2550' # 0xA0 -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u2551' # 0xA1 -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2552' # 0xA2 -> BOX DRAWINGS DOWN SINGLE AND RIGHT DOUBLE
u'\u0451' # 0xA3 -> CYRILLIC SMALL LETTER IO
u'\u0454' # 0xA4 -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\u2554' # 0xA5 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u0456' # 0xA6 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0457' # 0xA7 -> CYRILLIC SMALL LETTER YI (UKRAINIAN)
u'\u2557' # 0xA8 -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u2558' # 0xA9 -> BOX DRAWINGS UP SINGLE AND RIGHT DOUBLE
u'\u2559' # 0xAA -> BOX DRAWINGS UP DOUBLE AND RIGHT SINGLE
u'\u255a' # 0xAB -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u255b' # 0xAC -> BOX DRAWINGS UP SINGLE AND LEFT DOUBLE
u'\u0491' # 0xAD -> CYRILLIC SMALL LETTER UKRAINIAN GHE WITH UPTURN
u'\u255d' # 0xAE -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u255e' # 0xAF -> BOX DRAWINGS VERTICAL SINGLE AND RIGHT DOUBLE
u'\u255f' # 0xB0 -> BOX DRAWINGS VERTICAL DOUBLE AND RIGHT SINGLE
u'\u2560' # 0xB1 -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2561' # 0xB2 -> BOX DRAWINGS VERTICAL SINGLE AND LEFT DOUBLE
u'\u0401' # 0xB3 -> CYRILLIC CAPITAL LETTER IO
u'\u0404' # 0xB4 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\u2563' # 0xB5 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u0406' # 0xB6 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0407' # 0xB7 -> CYRILLIC CAPITAL LETTER YI (UKRAINIAN)
u'\u2566' # 0xB8 -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2567' # 0xB9 -> BOX DRAWINGS UP SINGLE AND HORIZONTAL DOUBLE
u'\u2568' # 0xBA -> BOX DRAWINGS UP DOUBLE AND HORIZONTAL SINGLE
u'\u2569' # 0xBB -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u256a' # 0xBC -> BOX DRAWINGS VERTICAL SINGLE AND HORIZONTAL DOUBLE
u'\u0490' # 0xBD -> CYRILLIC CAPITAL LETTER UKRAINIAN GHE WITH UPTURN
u'\u256c' # 0xBE -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa9' # 0xBF -> COPYRIGHT SIGN
u'\u044e' # 0xC0 -> CYRILLIC SMALL LETTER YU
u'\u0430' # 0xC1 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xC2 -> CYRILLIC SMALL LETTER BE
u'\u0446' # 0xC3 -> CYRILLIC SMALL LETTER TSE
u'\u0434' # 0xC4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xC5 -> CYRILLIC SMALL LETTER IE
u'\u0444' # 0xC6 -> CYRILLIC SMALL LETTER EF
u'\u0433' # 0xC7 -> CYRILLIC SMALL LETTER GHE
u'\u0445' # 0xC8 -> CYRILLIC SMALL LETTER HA
u'\u0438' # 0xC9 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xCA -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xCB -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xCC -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xCD -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xCE -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xCF -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xD0 -> CYRILLIC SMALL LETTER PE
u'\u044f' # 0xD1 -> CYRILLIC SMALL LETTER YA
u'\u0440' # 0xD2 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xD3 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xD4 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xD5 -> CYRILLIC SMALL LETTER U
u'\u0436' # 0xD6 -> CYRILLIC SMALL LETTER ZHE
u'\u0432' # 0xD7 -> CYRILLIC SMALL LETTER VE
u'\u044c' # 0xD8 -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044b' # 0xD9 -> CYRILLIC SMALL LETTER YERU
u'\u0437' # 0xDA -> CYRILLIC SMALL LETTER ZE
u'\u0448' # 0xDB -> CYRILLIC SMALL LETTER SHA
u'\u044d' # 0xDC -> CYRILLIC SMALL LETTER E
u'\u0449' # 0xDD -> CYRILLIC SMALL LETTER SHCHA
u'\u0447' # 0xDE -> CYRILLIC SMALL LETTER CHE
u'\u044a' # 0xDF -> CYRILLIC SMALL LETTER HARD SIGN
u'\u042e' # 0xE0 -> CYRILLIC CAPITAL LETTER YU
u'\u0410' # 0xE1 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xE2 -> CYRILLIC CAPITAL LETTER BE
u'\u0426' # 0xE3 -> CYRILLIC CAPITAL LETTER TSE
u'\u0414' # 0xE4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xE5 -> CYRILLIC CAPITAL LETTER IE
u'\u0424' # 0xE6 -> CYRILLIC CAPITAL LETTER EF
u'\u0413' # 0xE7 -> CYRILLIC CAPITAL LETTER GHE
u'\u0425' # 0xE8 -> CYRILLIC CAPITAL LETTER HA
u'\u0418' # 0xE9 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xEA -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xEB -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xEC -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xED -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xEE -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xEF -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xF0 -> CYRILLIC CAPITAL LETTER PE
u'\u042f' # 0xF1 -> CYRILLIC CAPITAL LETTER YA
u'\u0420' # 0xF2 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xF3 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xF4 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xF5 -> CYRILLIC CAPITAL LETTER U
u'\u0416' # 0xF6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0412' # 0xF7 -> CYRILLIC CAPITAL LETTER VE
u'\u042c' # 0xF8 -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042b' # 0xF9 -> CYRILLIC CAPITAL LETTER YERU
u'\u0417' # 0xFA -> CYRILLIC CAPITAL LETTER ZE
u'\u0428' # 0xFB -> CYRILLIC CAPITAL LETTER SHA
u'\u042d' # 0xFC -> CYRILLIC CAPITAL LETTER E
u'\u0429' # 0xFD -> CYRILLIC CAPITAL LETTER SHCHA
u'\u0427' # 0xFE -> CYRILLIC CAPITAL LETTER CHE
u'\u042a' # 0xFF -> CYRILLIC CAPITAL LETTER HARD SIGN
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
DougBurke/astropy | astropy/nddata/tests/test_nduncertainty.py | 2 | 8911 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
import numpy as np
from numpy.testing import assert_array_equal
from ..nduncertainty import (StdDevUncertainty, NDUncertainty,
IncompatibleUncertaintiesException,
UnknownUncertainty)
from ..nddata import NDData
from ... import units as u
# Regarding setter tests:
# No need to test setters since the uncertainty is considered immutable after
# creation except of the parent_nddata attribute and this accepts just
# everything.
# Additionally they should be covered by NDData, NDArithmeticMixin which rely
# on it
# Regarding propagate, _convert_uncert, _propagate_* tests:
# They should be covered by NDArithmeticMixin since there is generally no need
# to test them without this mixin.
# Regarding __getitem__ tests:
# Should be covered by NDSlicingMixin.
# Regarding StdDevUncertainty tests:
# This subclass only overrides the methods for propagation so the same
# they should be covered in NDArithmeticMixin.
# Not really fake but the minimum an uncertainty has to override not to be
# abstract.
class FakeUncertainty(NDUncertainty):
@property
def uncertainty_type(self):
return 'fake'
def _propagate_add(self, data, final_data):
pass
def _propagate_subtract(self, data, final_data):
pass
def _propagate_multiply(self, data, final_data):
pass
def _propagate_divide(self, data, final_data):
pass
# Test the fake (added also StdDevUncertainty which should behave identical)
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_list(UncertClass):
fake_uncert = UncertClass([1, 2, 3])
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
# Copy makes no difference since casting a list to an np.ndarray always
# makes a copy.
# But let's give the uncertainty a unit too
fake_uncert = UncertClass([1, 2, 3], unit=u.adu)
assert_array_equal(fake_uncert.array, np.array([1, 2, 3]))
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_ndarray(UncertClass):
uncert = np.arange(100).reshape(10, 10)
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
# Now try it without copy
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is uncert
# let's provide a unit
fake_uncert = UncertClass(uncert, unit=u.adu)
assert_array_equal(fake_uncert.array, uncert)
assert fake_uncert.array is not uncert
assert fake_uncert.unit is u.adu
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_quantity(UncertClass):
uncert = np.arange(10).reshape(2, 5) * u.adu
fake_uncert = UncertClass(uncert)
# Numpy Arrays are copied by default
assert_array_equal(fake_uncert.array, uncert.value)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Try without copy (should not work, quantity.value always returns a copy)
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.adu
# Now try with an explicit unit parameter too
fake_uncert = UncertClass(uncert, unit=u.m)
assert_array_equal(fake_uncert.array, uncert.value) # No conversion done
assert fake_uncert.array is not uncert.value
assert fake_uncert.unit is u.m # It took the explicit one
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_fake(UncertClass):
uncert = np.arange(5).reshape(5, 1)
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert)
assert fake_uncert2.array is not uncert
# Without making copies
fake_uncert1 = UncertClass(uncert, copy=False)
fake_uncert2 = UncertClass(fake_uncert1, copy=False)
assert_array_equal(fake_uncert2.array, fake_uncert1.array)
assert fake_uncert2.array is fake_uncert1.array
# With a unit
uncert = np.arange(5).reshape(5, 1) * u.adu
fake_uncert1 = UncertClass(uncert)
fake_uncert2 = UncertClass(fake_uncert1)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.adu
# With a unit and an explicit unit-parameter
fake_uncert2 = UncertClass(fake_uncert1, unit=u.cm)
assert_array_equal(fake_uncert2.array, uncert.value)
assert fake_uncert2.array is not uncert.value
assert fake_uncert2.unit is u.cm
@pytest.mark.parametrize(('UncertClass'), [FakeUncertainty, StdDevUncertainty,
UnknownUncertainty])
def test_init_fake_with_somethingElse(UncertClass):
# What about a dict?
uncert = {'rdnoise': 2.9, 'gain': 0.6}
fake_uncert = UncertClass(uncert)
assert fake_uncert.array == uncert
# We can pass a unit too but since we cannot do uncertainty propagation
# the interpretation is up to the user
fake_uncert = UncertClass(uncert, unit=u.s)
assert fake_uncert.array == uncert
assert fake_uncert.unit is u.s
# So, now check what happens if copy is False
fake_uncert = UncertClass(uncert, copy=False)
assert fake_uncert.array == uncert
assert id(fake_uncert) != id(uncert)
# dicts cannot be referenced without copy
# TODO : Find something that can be referenced without copy :-)
def test_init_fake_with_StdDevUncertainty():
# Different instances of uncertainties are not directly convertible so this
# should fail
uncert = np.arange(5).reshape(5, 1)
std_uncert = StdDevUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
FakeUncertainty(std_uncert)
# Ok try it the other way around
fake_uncert = FakeUncertainty(uncert)
with pytest.raises(IncompatibleUncertaintiesException):
StdDevUncertainty(fake_uncert)
def test_uncertainty_type():
fake_uncert = FakeUncertainty([10, 2])
assert fake_uncert.uncertainty_type == 'fake'
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.uncertainty_type == 'std'
def test_uncertainty_correlated():
fake_uncert = FakeUncertainty([10, 2])
assert not fake_uncert.supports_correlated
std_uncert = StdDevUncertainty([10, 2])
assert std_uncert.supports_correlated
def test_for_leak_with_uncertainty():
# Regression test for memory leak because of cyclic references between
# NDData and uncertainty
from collections import defaultdict
from gc import get_objects
def test_leak(func, specific_objects=None):
"""Function based on gc.get_objects to determine if any object or
a specific object leaks.
It requires a function to be given and if any objects survive the
function scope it's considered a leak (so don't return anything).
"""
before = defaultdict(int)
for i in get_objects():
before[type(i)] += 1
func()
after = defaultdict(int)
for i in get_objects():
after[type(i)] += 1
if specific_objects is None:
assert all(after[k] - before[k] == 0 for k in after)
else:
assert after[specific_objects] - before[specific_objects] == 0
def non_leaker_nddata():
# Without uncertainty there is no reason to assume that there is a
# memory leak but test it nevertheless.
NDData(np.ones(100))
def leaker_nddata():
# With uncertainty there was a memory leak!
NDData(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddata, NDData)
test_leak(leaker_nddata, NDData)
# Same for NDDataArray:
from ..compat import NDDataArray
def non_leaker_nddataarray():
NDDataArray(np.ones(100))
def leaker_nddataarray():
NDDataArray(np.ones(100), uncertainty=StdDevUncertainty(np.ones(100)))
test_leak(non_leaker_nddataarray, NDDataArray)
test_leak(leaker_nddataarray, NDDataArray)
def test_for_stolen_uncertainty():
# Sharing uncertainties should not overwrite the parent_nddata attribute
ndd1 = NDData(1, uncertainty=1)
ndd2 = NDData(2, uncertainty=ndd1.uncertainty)
# uncertainty.parent_nddata.data should be the original data!
assert ndd1.uncertainty.parent_nddata.data == ndd1.data
| bsd-3-clause |
hj3938/or-tools | examples/tests/issue5.py | 39 | 5719 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
A programming puzzle from Einav in Google CP Solver.
From
'A programming puzzle from Einav'
http://gcanyon.wordpress.com/2009/10/28/a-programming-puzzle-from-einav/
My friend Einav gave me this programming puzzle to work on. Given
this array of positive and negative numbers:
33 30 -10 -6 18 7 -11 -23 6
...
-25 4 16 30 33 -23 -4 4 -23
You can flip the sign of entire rows and columns, as many of them
as you like. The goal is to make all the rows and columns sum to positive
numbers (or zero), and then to find the solution (there are more than one)
that has the smallest overall sum. So for example, for this array:
33 30 -10
-16 19 9
-17 -12 -14
You could flip the sign for the bottom row to get this array:
33 30 -10
-16 19 9
17 12 14
Now all the rows and columns have positive sums, and the overall total is
108.
But you could instead flip the second and third columns, and the second
row, to get this array:
33 -30 10
16 19 9
-17 12 14
All the rows and columns still total positive, and the overall sum is just
66. So this solution is better (I don't know if it's the best)
A pure brute force solution would have to try over 30 billion solutions.
I wrote code to solve this in J. I'll post that separately.
Compare with the following models:
* MiniZinc http://www.hakank.org/minizinc/einav_puzzle.mzn
* SICStus: http://hakank.org/sicstus/einav_puzzle.pl
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
'''
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('Einav puzzle')
#
# data
#
# small problem
# data = [
# [ 33, 30, -10],
# [-16, 19, 9],
# [-17, -12, -14]
# ]
data = [[33, 30, 10, -6, 18, -7, -11, 23, -6],
[16, -19, 9, -26, -8, -19, -8, -21, -14],
[17, 12, -14, 31, -30, 13, -13, 19, 16],
[-6, -11, 1, 17, -12, -4, -7, 14, -21],
[18, -31, 34, -22, 17, -19, 20, 24, 6],
[33, -18, 17, -15, 31, -5, 3, 27, -3],
[-18, -20, -18, 31, 6, 4, -2, -12, 24],
[27, 14, 4, -29, -3, 5, -29, 8, -12],
[-15, -7, -23, 23, -9, -8, 6, 8, -12],
[33, -23, -19, -4, -8, -7, 11, -12, 31],
[-20, 19, -15, -30, 11, 32, 7, 14, -5],
[-23, 18, -32, -2, -31, -7, 8, 24, 16],
[32, -4, -10, -14, -6, -1, 0, 23, 23],
[25, 0, -23, 22, 12, 28, -27, 15, 4],
[-30, -13, -16, -3, -3, -32, -3, 27, -31],
[22, 1, 26, 4, -2, -13, 26, 17, 14],
[-9, -18, 3, -20, -27, -32, -11, 27, 13],
[-17, 33, -7, 19, -32, 13, -31, -2, -24],
[-31, 27, -31, -29, 15, 2, 29, -15, 33],
[-18, -23, 15, 28, 0, 30, -4, 12, -32],
[-3, 34, 27, -25, -18, 26, 1, 34, 26],
[-21, -31, -10, -13, -30, -17, -12, -26, 31],
[23, -31, -19, 21, -17, -10, 2, -23, 23],
[-3, 6, 0, -3, -32, 0, -10, -25, 14],
[-19, 9, 14, -27, 20, 15, -5, -27, 18],
[11, -6, 24, 7, -17, 26, 20, -31, -25],
[-25, 4, -16, 30, 33, 23, -4, -4, 23]]
rows = len(data)
cols = len(data[0])
#
# variables
#
x = {}
for i in range(rows):
for j in range(cols):
x[i, j] = solver.IntVar(-100, 100, 'x[%i,%i]' % (i, j))
row_signs = [solver.IntVar([-1, 1], 'row_signs(%i)' % i)
for i in range(rows)]
col_signs = [solver.IntVar([-1, 1], 'col_signs(%i)' % j)
for j in range(cols)]
#
# constraints
#
for i in range(rows):
for j in range(cols):
solver.Add(x[i, j] == data[i][j] * row_signs[i] * col_signs[j])
total_sum = solver.Sum([x[i, j] for i in range(rows) for j in range(cols)])
# row sums
row_sums = [solver.Sum([x[i, j] for j in range(cols)]).Var()
for i in range(rows)]
# >= 0
for i in range(rows):
row_sums[i].SetMin(0)
# column sums
col_sums = [solver.Sum([x[i, j] for i in range(rows)]).Var()
for j in range(cols)]
for j in range(cols):
col_sums[j].SetMin(0)
# objective
objective = solver.Minimize(total_sum, 1)
#
# search and result
#
db = solver.Phase(col_signs + row_signs,
solver.CHOOSE_FIRST_UNBOUND,
solver.ASSIGN_MIN_VALUE)
search_log = solver.SearchLog(100000, total_sum)
solver.NewSearch(db, [objective, search_log])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print 'Sum =', objective.best()
print 'row_sums:', [row_sums[i].Value() for i in range(rows)]
print 'col_sums:', [col_sums[j].Value() for j in range(cols)]
for i in range(rows):
for j in range(cols):
print x[i, j].Value(),
print
print
solver.EndSearch()
print
print 'num_solutions:', num_solutions
print 'failures:', solver.failures()
print 'branches:', solver.branches()
print 'wall_time:', solver.wall_time(), 'ms'
if __name__ == '__main__':
main()
| apache-2.0 |
dylanlesko/youtube-dl | youtube_dl/extractor/beatportpro.py | 142 | 3423 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import int_or_none
class BeatportProIE(InfoExtractor):
_VALID_URL = r'https?://pro\.beatport\.com/track/(?P<display_id>[^/]+)/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'https://pro.beatport.com/track/synesthesia-original-mix/5379371',
'md5': 'b3c34d8639a2f6a7f734382358478887',
'info_dict': {
'id': '5379371',
'display_id': 'synesthesia-original-mix',
'ext': 'mp4',
'title': 'Froxic - Synesthesia (Original Mix)',
},
}, {
'url': 'https://pro.beatport.com/track/love-and-war-original-mix/3756896',
'md5': 'e44c3025dfa38c6577fbaeb43da43514',
'info_dict': {
'id': '3756896',
'display_id': 'love-and-war-original-mix',
'ext': 'mp3',
'title': 'Wolfgang Gartner - Love & War (Original Mix)',
},
}, {
'url': 'https://pro.beatport.com/track/birds-original-mix/4991738',
'md5': 'a1fd8e8046de3950fd039304c186c05f',
'info_dict': {
'id': '4991738',
'display_id': 'birds-original-mix',
'ext': 'mp4',
'title': "Tos, Middle Milk, Mumblin' Johnsson - Birds (Original Mix)",
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
track_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
playables = self._parse_json(
self._search_regex(
r'window\.Playables\s*=\s*({.+?});', webpage,
'playables info', flags=re.DOTALL),
track_id)
track = next(t for t in playables['tracks'] if t['id'] == int(track_id))
title = ', '.join((a['name'] for a in track['artists'])) + ' - ' + track['name']
if track['mix']:
title += ' (' + track['mix'] + ')'
formats = []
for ext, info in track['preview'].items():
if not info['url']:
continue
fmt = {
'url': info['url'],
'ext': ext,
'format_id': ext,
'vcodec': 'none',
}
if ext == 'mp3':
fmt['preference'] = 0
fmt['acodec'] = 'mp3'
fmt['abr'] = 96
fmt['asr'] = 44100
elif ext == 'mp4':
fmt['preference'] = 1
fmt['acodec'] = 'aac'
fmt['abr'] = 96
fmt['asr'] = 44100
formats.append(fmt)
self._sort_formats(formats)
images = []
for name, info in track['images'].items():
image_url = info.get('url')
if name == 'dynamic' or not image_url:
continue
image = {
'id': name,
'url': image_url,
'height': int_or_none(info.get('height')),
'width': int_or_none(info.get('width')),
}
images.append(image)
return {
'id': compat_str(track.get('id')) or track_id,
'display_id': track.get('slug') or display_id,
'title': title,
'formats': formats,
'thumbnails': images,
}
| unlicense |
lorenzo-desantis/mne-python | mne/io/edf/edf.py | 5 | 25772 | """Conversion tool from EDF, EDF+, BDF to FIF
"""
# Authors: Teon Brooks <teon.brooks@gmail.com>
# Martin Billinger <martin.billinger@tugraz.at>
#
# License: BSD (3-clause)
import os
import calendar
import datetime
import re
import warnings
from math import ceil, floor
import numpy as np
from ...utils import verbose, logger
from ..base import _BaseRaw, _check_update_montage
from ..meas_info import _empty_info
from ..pick import pick_types
from ..constants import FIFF
from ...filter import resample
from ...externals.six.moves import zip
class RawEDF(_BaseRaw):
"""Raw object from EDF, EDF+, BDF file
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
@verbose
def __init__(self, input_fname, montage, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None,
preload=False, verbose=None):
logger.info('Extracting edf Parameters from %s...' % input_fname)
input_fname = os.path.abspath(input_fname)
info, edf_info = _get_edf_info(input_fname, stim_channel,
annot, annotmap,
eog, misc, preload)
logger.info('Creating Raw.info structure...')
_check_update_montage(info, montage)
if bool(annot) != bool(annotmap):
warnings.warn(("Stimulus Channel will not be annotated. "
"Both 'annot' and 'annotmap' must be specified."))
# Raw attributes
last_samps = [edf_info['nsamples'] - 1]
super(RawEDF, self).__init__(
info, preload, filenames=[input_fname], raw_extras=[edf_info],
last_samps=last_samps, orig_format='int',
verbose=verbose)
logger.info('Ready.')
@verbose
def _read_segment_file(self, data, idx, offset, fi, start, stop,
cals, mult):
"""Read a chunk of raw data"""
from scipy.interpolate import interp1d
if mult is not None:
# XXX "cals" here does not function the same way as in RawFIF,
# and for efficiency we want to be able to combine mult and cals
# so proj support will have to wait until this is resolved
raise NotImplementedError('mult is not supported yet')
# RawFIF and RawEDF think of "stop" differently, easiest to increment
# here and refactor later
stop += 1
sel = np.arange(self.info['nchan'])[idx]
n_samps = self._raw_extras[fi]['n_samps']
buf_len = self._raw_extras[fi]['max_samp']
sfreq = self.info['sfreq']
n_chan = self.info['nchan']
data_size = self._raw_extras[fi]['data_size']
data_offset = self._raw_extras[fi]['data_offset']
stim_channel = self._raw_extras[fi]['stim_channel']
tal_channel = self._raw_extras[fi]['tal_channel']
annot = self._raw_extras[fi]['annot']
annotmap = self._raw_extras[fi]['annotmap']
subtype = self._raw_extras[fi]['subtype']
# this is used to deal with indexing in the middle of a sampling period
blockstart = int(floor(float(start) / buf_len) * buf_len)
blockstop = int(ceil(float(stop) / buf_len) * buf_len)
# gain constructor
physical_range = np.array([ch['range'] for ch in self.info['chs']])
cal = np.array([ch['cal'] for ch in self.info['chs']])
gains = np.atleast_2d(self._raw_extras[fi]['units'] *
(physical_range / cal))
# physical dimension in uV
physical_min = np.atleast_2d(self._raw_extras[fi]['units'] *
self._raw_extras[fi]['physical_min'])
digital_min = self._raw_extras[fi]['digital_min']
offsets = np.atleast_2d(physical_min - (digital_min * gains)).T
if tal_channel is not None:
offsets[tal_channel] = 0
read_size = blockstop - blockstart
this_data = np.empty((len(sel), buf_len))
data = data[:, offset:offset + (stop - start)]
"""
Consider this example:
tmin, tmax = (2, 27)
read_size = 30
buf_len = 10
sfreq = 1.
+---------+---------+---------+
File structure: | buf0 | buf1 | buf2 |
+---------+---------+---------+
File time: 0 10 20 30
+---------+---------+---------+
Requested time: 2 27
| |
blockstart blockstop
| |
start stop
We need 27 - 2 = 25 samples (per channel) to store our data, and
we need to read from 3 buffers (30 samples) to get all of our data.
On all reads but the first, the data we read starts at
the first sample of the buffer. On all reads but the last,
the data we read ends on the last sample of the buffer.
We call this_data the variable that stores the current buffer's data,
and data the variable that stores the total output.
On the first read, we need to do this::
>>> data[0:buf_len-2] = this_data[2:buf_len]
On the second read, we need to do::
>>> data[1*buf_len-2:2*buf_len-2] = this_data[0:buf_len]
On the final read, we need to do::
>>> data[2*buf_len-2:3*buf_len-2-3] = this_data[0:buf_len-3]
"""
with open(self._filenames[fi], 'rb', buffering=0) as fid:
# extract data
fid.seek(data_offset + blockstart * n_chan * data_size)
n_blk = int(ceil(float(read_size) / buf_len))
start_offset = start - blockstart
end_offset = blockstop - stop
for bi in range(n_blk):
# Triage start (sidx) and end (eidx) indices for
# data (d) and read (r)
if bi == 0:
d_sidx = 0
r_sidx = start_offset
else:
d_sidx = bi * buf_len - start_offset
r_sidx = 0
if bi == n_blk - 1:
d_eidx = data.shape[1]
r_eidx = buf_len - end_offset
else:
d_eidx = (bi + 1) * buf_len - start_offset
r_eidx = buf_len
n_buf_samp = r_eidx - r_sidx
count = 0
for j, samp in enumerate(n_samps):
# bdf data: 24bit data
if j not in sel:
fid.seek(samp * data_size, 1)
continue
if samp == buf_len:
# use faster version with skips built in
if r_sidx > 0:
fid.seek(r_sidx * data_size, 1)
ch_data = _read_ch(fid, subtype, n_buf_samp, data_size)
if r_eidx < buf_len:
fid.seek((buf_len - r_eidx) * data_size, 1)
else:
# read in all the data and triage appropriately
ch_data = _read_ch(fid, subtype, samp, data_size)
if j == tal_channel:
# don't resample tal_channel,
# pad with zeros instead.
n_missing = int(buf_len - samp)
ch_data = np.hstack([ch_data, [0] * n_missing])
ch_data = ch_data[r_sidx:r_eidx]
elif j == stim_channel:
if annot and annotmap or \
tal_channel is not None:
# don't bother with resampling the stim ch
# because it gets overwritten later on.
ch_data = np.zeros(n_buf_samp)
else:
warnings.warn('Interpolating stim channel.'
' Events may jitter.')
oldrange = np.linspace(0, 1, samp + 1, True)
newrange = np.linspace(0, 1, buf_len, False)
newrange = newrange[r_sidx:r_eidx]
ch_data = interp1d(
oldrange, np.append(ch_data, 0),
kind='zero')(newrange)
else:
ch_data = resample(ch_data, buf_len, samp,
npad=0)[r_sidx:r_eidx]
this_data[count, :n_buf_samp] = ch_data
count += 1
data[:, d_sidx:d_eidx] = this_data[:, :n_buf_samp]
data *= gains.T[sel]
data += offsets[sel]
# only try to read the stim channel if it's not None and it's
# actually one of the requested channels
if stim_channel is not None and (sel == stim_channel).sum() > 0:
stim_channel_idx = np.where(sel == stim_channel)[0]
if annot and annotmap:
evts = _read_annot(annot, annotmap, sfreq,
self._last_samps[fi])
data[stim_channel_idx, :] = evts[start:stop]
elif tal_channel is not None:
tal_channel_idx = np.where(sel == tal_channel)[0][0]
evts = _parse_tal_channel(data[tal_channel_idx])
self._raw_extras[fi]['events'] = evts
unique_annots = sorted(set([e[2] for e in evts]))
mapping = dict((a, n + 1) for n, a in enumerate(unique_annots))
stim = np.zeros(read_size)
for t_start, t_duration, annotation in evts:
evid = mapping[annotation]
n_start = int(t_start * sfreq)
n_stop = int(t_duration * sfreq) + n_start - 1
# make sure events without duration get one sample
n_stop = n_stop if n_stop > n_start else n_start + 1
if any(stim[n_start:n_stop]):
raise NotImplementedError('EDF+ with overlapping '
'events not supported.')
stim[n_start:n_stop] = evid
data[stim_channel_idx, :] = stim[start:stop]
else:
# Allows support for up to 16-bit trigger values (2 ** 16 - 1)
stim = np.bitwise_and(data[stim_channel_idx].astype(int),
65535)
data[stim_channel_idx, :] = stim
def _read_ch(fid, subtype, samp, data_size):
"""Helper to read a number of samples for a single channel"""
if subtype in ('24BIT', 'bdf'):
ch_data = np.fromfile(fid, dtype=np.uint8,
count=samp * data_size)
ch_data = ch_data.reshape(-1, 3).astype(np.int32)
ch_data = ((ch_data[:, 0]) +
(ch_data[:, 1] << 8) +
(ch_data[:, 2] << 16))
# 24th bit determines the sign
ch_data[ch_data >= (1 << 23)] -= (1 << 24)
# edf data: 16bit data
else:
ch_data = np.fromfile(fid, dtype='<i2', count=samp)
return ch_data
def _parse_tal_channel(tal_channel_data):
"""Parse time-stamped annotation lists (TALs) in stim_channel
and return list of events.
Parameters
----------
tal_channel_data : ndarray, shape = [n_samples]
channel data in EDF+ TAL format
Returns
-------
events : list
List of events. Each event contains [start, duration, annotation].
References
----------
http://www.edfplus.info/specs/edfplus.html#tal
"""
# convert tal_channel to an ascii string
tals = bytearray()
for s in tal_channel_data:
i = int(s)
tals.extend([i % 256, i // 256])
regex_tal = '([+-]\d+\.?\d*)(\x15(\d+\.?\d*))?(\x14.*?)\x14\x00'
tal_list = re.findall(regex_tal, tals.decode('ascii'))
events = []
for ev in tal_list:
onset = float(ev[0])
duration = float(ev[2]) if ev[2] else 0
for annotation in ev[3].split('\x14')[1:]:
if annotation:
events.append([onset, duration, annotation])
return events
def _get_edf_info(fname, stim_channel, annot, annotmap, eog, misc, preload):
"""Extracts all the information from the EDF+,BDF file"""
if eog is None:
eog = []
if misc is None:
misc = []
info = _empty_info()
info['filename'] = fname
edf_info = dict()
edf_info['annot'] = annot
edf_info['annotmap'] = annotmap
edf_info['events'] = []
with open(fname, 'rb') as fid:
assert(fid.tell() == 0)
fid.seek(8)
fid.read(80).strip().decode() # subject id
fid.read(80).strip().decode() # recording id
day, month, year = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
hour, minute, sec = [int(x) for x in re.findall('(\d+)',
fid.read(8).decode())]
date = datetime.datetime(year + 2000, month, day, hour, minute, sec)
info['meas_date'] = calendar.timegm(date.utctimetuple())
edf_info['data_offset'] = header_nbytes = int(fid.read(8).decode())
subtype = fid.read(44).strip().decode()[:5]
if len(subtype) > 0:
edf_info['subtype'] = subtype
else:
edf_info['subtype'] = os.path.splitext(fname)[1][1:].lower()
edf_info['n_records'] = n_records = int(fid.read(8).decode())
# record length in seconds
record_length = float(fid.read(8).decode())
if record_length == 0:
edf_info['record_length'] = record_length = 1.
warnings.warn('Header information is incorrect for record length. '
'Default record length set to 1.')
else:
edf_info['record_length'] = record_length
info['nchan'] = nchan = int(fid.read(4).decode())
channels = list(range(info['nchan']))
ch_names = [fid.read(16).strip().decode() for ch in channels]
for ch in channels:
fid.read(80) # transducer
units = [fid.read(8).strip().decode() for ch in channels]
for i, unit in enumerate(units):
if unit == 'uV':
units[i] = 1e-6
else:
units[i] = 1
edf_info['units'] = units
physical_min = np.array([float(fid.read(8).decode())
for ch in channels])
edf_info['physical_min'] = physical_min
physical_max = np.array([float(fid.read(8).decode())
for ch in channels])
digital_min = np.array([float(fid.read(8).decode())
for ch in channels])
edf_info['digital_min'] = digital_min
digital_max = np.array([float(fid.read(8).decode())
for ch in channels])
prefiltering = [fid.read(80).strip().decode() for ch in channels][:-1]
highpass = np.ravel([re.findall('HP:\s+(\w+)', filt)
for filt in prefiltering])
lowpass = np.ravel([re.findall('LP:\s+(\w+)', filt)
for filt in prefiltering])
high_pass_default = 0.
if highpass.size == 0:
info['highpass'] = high_pass_default
elif all(highpass):
if highpass[0] == 'NaN':
info['highpass'] = high_pass_default
elif highpass[0] == 'DC':
info['highpass'] = 0.
else:
info['highpass'] = float(highpass[0])
else:
info['highpass'] = float(np.min(highpass))
warnings.warn('Channels contain different highpass filters. '
'Highest filter setting will be stored.')
if lowpass.size == 0:
info['lowpass'] = None
elif all(lowpass):
if lowpass[0] == 'NaN':
info['lowpass'] = None
else:
info['lowpass'] = float(lowpass[0])
else:
info['lowpass'] = float(np.min(lowpass))
warnings.warn('%s' % ('Channels contain different lowpass filters.'
' Lowest filter setting will be stored.'))
# number of samples per record
n_samps = np.array([int(fid.read(8).decode()) for ch in channels])
edf_info['n_samps'] = n_samps
fid.read(32 * info['nchan']).decode() # reserved
assert fid.tell() == header_nbytes
physical_ranges = physical_max - physical_min
cals = digital_max - digital_min
# Some keys to be consistent with FIF measurement info
info['description'] = None
info['buffer_size_sec'] = 10.
if edf_info['subtype'] in ('24BIT', 'bdf'):
edf_info['data_size'] = 3 # 24-bit (3 byte) integers
else:
edf_info['data_size'] = 2 # 16-bit (2 byte) integers
# Creates a list of dicts of eeg channels for raw.info
logger.info('Setting channel info structure...')
info['chs'] = []
info['ch_names'] = ch_names
tal_ch_name = 'EDF Annotations'
if tal_ch_name in ch_names:
tal_channel = ch_names.index(tal_ch_name)
else:
tal_channel = None
edf_info['tal_channel'] = tal_channel
if tal_channel is not None and stim_channel is not None and not preload:
raise RuntimeError('%s' % ('EDF+ Annotations (TAL) channel needs to be'
' parsed completely on loading.'
' You must set preload parameter to True.'))
if stim_channel == -1:
stim_channel = info['nchan'] - 1
for idx, ch_info in enumerate(zip(ch_names, physical_ranges, cals)):
ch_name, physical_range, cal = ch_info
chan_info = {}
chan_info['cal'] = cal
chan_info['logno'] = idx + 1
chan_info['scanno'] = idx + 1
chan_info['range'] = physical_range
chan_info['unit_mul'] = 0.
chan_info['ch_name'] = ch_name
chan_info['unit'] = FIFF.FIFF_UNIT_V
chan_info['coord_frame'] = FIFF.FIFFV_COORD_HEAD
chan_info['coil_type'] = FIFF.FIFFV_COIL_EEG
chan_info['kind'] = FIFF.FIFFV_EEG_CH
chan_info['eeg_loc'] = np.zeros(3)
chan_info['loc'] = np.zeros(12)
if ch_name in eog or idx in eog or idx - nchan in eog:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_EOG_CH
if ch_name in misc or idx in misc or idx - nchan in misc:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
check1 = stim_channel == ch_name
check2 = stim_channel == idx
check3 = info['nchan'] > 1
stim_check = np.logical_and(np.logical_or(check1, check2), check3)
if stim_check:
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_STIM_CH
chan_info['ch_name'] = 'STI 014'
info['ch_names'][idx] = chan_info['ch_name']
units[idx] = 1
if isinstance(stim_channel, str):
stim_channel = idx
if tal_channel == idx:
chan_info['range'] = 1
chan_info['cal'] = 1
chan_info['coil_type'] = FIFF.FIFFV_COIL_NONE
chan_info['unit'] = FIFF.FIFF_UNIT_NONE
chan_info['kind'] = FIFF.FIFFV_MISC_CH
info['chs'].append(chan_info)
edf_info['stim_channel'] = stim_channel
# sfreq defined as the max sampling rate of eeg
picks = pick_types(info, meg=False, eeg=True)
if len(picks) == 0:
edf_info['max_samp'] = max_samp = n_samps.max()
else:
edf_info['max_samp'] = max_samp = n_samps[picks].max()
info['sfreq'] = max_samp / record_length
edf_info['nsamples'] = int(n_records * max_samp)
if info['lowpass'] is None:
info['lowpass'] = info['sfreq'] / 2.
return info, edf_info
def _read_annot(annot, annotmap, sfreq, data_length):
"""Annotation File Reader
Parameters
----------
annot : str
Path to annotation file.
annotmap : str
Path to annotation map file containing mapping from label to trigger.
sfreq : float
Sampling frequency.
data_length : int
Length of the data file.
Returns
-------
stim_channel : ndarray
An array containing stimulus trigger events.
"""
pat = '([+/-]\d+.\d+),(\w+)'
annot = open(annot).read()
triggers = re.findall(pat, annot)
times, values = zip(*triggers)
times = [float(time) * sfreq for time in times]
pat = '(\w+):(\d+)'
annotmap = open(annotmap).read()
mappings = re.findall(pat, annotmap)
maps = {}
for mapping in mappings:
maps[mapping[0]] = mapping[1]
triggers = [int(maps[value]) for value in values]
stim_channel = np.zeros(data_length)
for time, trigger in zip(times, triggers):
stim_channel[time] = trigger
return stim_channel
def read_raw_edf(input_fname, montage=None, eog=None, misc=None,
stim_channel=-1, annot=None, annotmap=None,
preload=False, verbose=None):
"""Reader function for EDF+, BDF conversion to FIF
Parameters
----------
input_fname : str
Path to the EDF+,BDF file.
montage : str | None | instance of Montage
Path or instance of montage containing electrode positions.
If None, sensor locations are (0,0,0). See the documentation of
:func:`mne.channels.read_montage` for more information.
eog : list or tuple
Names of channels or list of indices that should be designated
EOG channels. Values should correspond to the electrodes in the
edf file. Default is None.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Values should correspond to the electrodes in the
edf file. Default is None.
stim_channel : str | int | None
The channel name or channel index (starting at 0).
-1 corresponds to the last channel (default).
If None, there will be no stim channel added.
annot : str | None
Path to annotation file.
If None, no derived stim channel will be added (for files requiring
annotation file to interpret stim channel).
annotmap : str | None
Path to annotation map file containing mapping from label to trigger.
Must be specified if annot is not None.
preload : bool or str (default False)
Preload data into memory for data manipulation and faster indexing.
If True, the data will be preloaded into memory (fast, requires
large amount of memory). If preload is a string, preload is the
file name of a memory-mapped file which is used to store the data
on the hard drive (slower, requires less memory).
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
raw : Instance of RawEDF
A Raw object containing EDF data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawEDF(input_fname=input_fname, montage=montage, eog=eog, misc=misc,
stim_channel=stim_channel, annot=annot, annotmap=annotmap,
preload=preload, verbose=verbose)
| bsd-3-clause |
c4sc/arividam | arividam/djangocms_news/views.py | 1 | 2690 | from django.shortcuts import render
from django.http import HttpResponse
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.sites.shortcuts import get_current_site
# Create your views here.
from cms.models import Page
from cms.api import create_page
from postman.api import pm_write
from .models import PromotedNews
import json
from arividam.utils import get_default_site_page_by_slug
from django.contrib.auth import get_user_model
import logging
from django.db import IntegrityError
from django.views.generic import ListView
from django.views.generic.detail import DetailView
User = get_user_model()
logger = logging.getLogger(__name__)
def check_promoted(request, page_id):
page_id = int(page_id)
page = Page.objects.get(pk=page_id)
if PromotedNews.objects.filter(page=page).exists():
data = { "promoted": True }
else:
data = { "promoted": False }
return HttpResponse(json.dumps(data), content_type="application/json")
def promote_news(request, page_id):
page_id = int(page_id)
page = Page.objects.get(pk=page_id)
current_site = get_current_site(request)
try:
p = PromotedNews.objects.create(page=page, site=current_site)
except IntegrityError:
# article has already been promoted
return HttpResponse(json.dumps({"promoted": False}), content_type="application/json")
news = get_default_site_page_by_slug("news")
current_school = current_site.domain.split(".")[0]
site = Site.objects.get(pk=settings.DEFAULT_SITE_ID)
if site != current_site:
article = create_page(u"{}-{}".format(current_school, page.get_title(settings.LANGUAGE_CODE)), 'cms/article.html', settings.LANGUAGE_CODE,
parent=news, published=False, slug=page.get_slug(settings.LANGUAGE_CODE), site=site)
ph = article.placeholders.get(slot="content")
old_ph = page.placeholders.get(slot="content")
plugin = old_ph.get_plugins("en")[0]
pc = {}
plugin.copy_plugin(ph, settings.LANGUAGE_CODE, pc, no_signals=False)
article.publish(settings.LANGUAGE_CODE)
admin = User.objects.get(username="admin")
try:
editor = User.objects.get(username="editor")
pm_write(sender=admin, recipient=editor, subject="Promoted news", body="A news article has been promoted. You can visit the article at http://www.arividam.in{}?edit&language=en".format(article.get_absolute_url(settings.LANGUAGE_CODE)))
except User.DoesNotExist:
pass
#article.publish(settings.LANGUAGE_CODE)
return HttpResponse(json.dumps({"promoted": True}), content_type="application/json")
| mit |
lmazuel/azure-sdk-for-python | unreleased/azure-mgmt-machinelearning/azure/mgmt/machinelearning/models/web_service_properties.py | 5 | 8236 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class WebServiceProperties(Model):
"""The set of properties specific to the Azure ML web service resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:param title: The title of the web service.
:type title: str
:param description: The description of the web service.
:type description: str
:ivar created_on: Read Only: The date and time when the web service was
created.
:vartype created_on: datetime
:ivar modified_on: Read Only: The date and time when the web service was
last modified.
:vartype modified_on: datetime
:ivar provisioning_state: Read Only: The provision state of the web
service. Valid values are Unknown, Provisioning, Succeeded, and Failed.
Possible values include: 'Unknown', 'Provisioning', 'Succeeded', 'Failed'
:vartype provisioning_state: str or :class:`ProvisioningState
<azure.mgmt.machinelearning.models.ProvisioningState>`
:param keys: Contains the web service provisioning keys. If you do not
specify provisioning keys, the Azure Machine Learning system generates
them for you. Note: The keys are not returned from calls to GET
operations.
:type keys: :class:`WebServiceKeys
<azure.mgmt.machinelearning.models.WebServiceKeys>`
:param read_only: When set to true, indicates that the web service is
read-only and can no longer be updated or patched, only removed. Default,
is false. Note: Once set to true, you cannot change its value.
:type read_only: bool
:ivar swagger_location: Read Only: Contains the URI of the swagger spec
associated with this web service.
:vartype swagger_location: str
:param expose_sample_data: When set to true, sample data is included in
the web service's swagger definition. The default value is true.
:type expose_sample_data: bool
:param realtime_configuration: Contains the configuration settings for the
web service endpoint.
:type realtime_configuration: :class:`RealtimeConfiguration
<azure.mgmt.machinelearning.models.RealtimeConfiguration>`
:param diagnostics: Settings controlling the diagnostics traces collection
for the web service.
:type diagnostics: :class:`DiagnosticsConfiguration
<azure.mgmt.machinelearning.models.DiagnosticsConfiguration>`
:param storage_account: Specifies the storage account that Azure Machine
Learning uses to store information about the web service. Only the name of
the storage account is returned from calls to GET operations. When
updating the storage account information, you must ensure that all
necessary assets are available in the new storage account or calls to your
web service will fail.
:type storage_account: :class:`StorageAccount
<azure.mgmt.machinelearning.models.StorageAccount>`
:param machine_learning_workspace: Specifies the Machine Learning
workspace containing the experiment that is source for the web service.
:type machine_learning_workspace: :class:`MachineLearningWorkspace
<azure.mgmt.machinelearning.models.MachineLearningWorkspace>`
:param commitment_plan: Contains the commitment plan associated with this
web service. Set at creation time. Once set, this value cannot be changed.
Note: The commitment plan is not returned from calls to GET operations.
:type commitment_plan: :class:`CommitmentPlan
<azure.mgmt.machinelearning.models.CommitmentPlan>`
:param input: Contains the Swagger 2.0 schema describing one or more of
the web service's inputs. For more information, see the Swagger
specification.
:type input: :class:`ServiceInputOutputSpecification
<azure.mgmt.machinelearning.models.ServiceInputOutputSpecification>`
:param output: Contains the Swagger 2.0 schema describing one or more of
the web service's outputs. For more information, see the Swagger
specification.
:type output: :class:`ServiceInputOutputSpecification
<azure.mgmt.machinelearning.models.ServiceInputOutputSpecification>`
:param example_request: Defines sample input data for one or more of the
service's inputs.
:type example_request: :class:`ExampleRequest
<azure.mgmt.machinelearning.models.ExampleRequest>`
:param assets: Contains user defined properties describing web service
assets. Properties are expressed as Key/Value pairs.
:type assets: dict
:param parameters: The set of global parameters values defined for the web
service, given as a global parameter name to default value map. If no
default value is specified, the parameter is considered to be required.
:type parameters: dict
:param package_type: Polymorphic Discriminator
:type package_type: str
"""
_validation = {
'created_on': {'readonly': True},
'modified_on': {'readonly': True},
'provisioning_state': {'readonly': True},
'swagger_location': {'readonly': True},
'package_type': {'required': True},
}
_attribute_map = {
'title': {'key': 'title', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'created_on': {'key': 'createdOn', 'type': 'iso-8601'},
'modified_on': {'key': 'modifiedOn', 'type': 'iso-8601'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'keys': {'key': 'keys', 'type': 'WebServiceKeys'},
'read_only': {'key': 'readOnly', 'type': 'bool'},
'swagger_location': {'key': 'swaggerLocation', 'type': 'str'},
'expose_sample_data': {'key': 'exposeSampleData', 'type': 'bool'},
'realtime_configuration': {'key': 'realtimeConfiguration', 'type': 'RealtimeConfiguration'},
'diagnostics': {'key': 'diagnostics', 'type': 'DiagnosticsConfiguration'},
'storage_account': {'key': 'storageAccount', 'type': 'StorageAccount'},
'machine_learning_workspace': {'key': 'machineLearningWorkspace', 'type': 'MachineLearningWorkspace'},
'commitment_plan': {'key': 'commitmentPlan', 'type': 'CommitmentPlan'},
'input': {'key': 'input', 'type': 'ServiceInputOutputSpecification'},
'output': {'key': 'output', 'type': 'ServiceInputOutputSpecification'},
'example_request': {'key': 'exampleRequest', 'type': 'ExampleRequest'},
'assets': {'key': 'assets', 'type': '{AssetItem}'},
'parameters': {'key': 'parameters', 'type': '{str}'},
'package_type': {'key': 'packageType', 'type': 'str'},
}
_subtype_map = {
'package_type': {'Graph': 'WebServicePropertiesForGraph'}
}
def __init__(self, title=None, description=None, keys=None, read_only=None, expose_sample_data=None, realtime_configuration=None, diagnostics=None, storage_account=None, machine_learning_workspace=None, commitment_plan=None, input=None, output=None, example_request=None, assets=None, parameters=None):
self.title = title
self.description = description
self.created_on = None
self.modified_on = None
self.provisioning_state = None
self.keys = keys
self.read_only = read_only
self.swagger_location = None
self.expose_sample_data = expose_sample_data
self.realtime_configuration = realtime_configuration
self.diagnostics = diagnostics
self.storage_account = storage_account
self.machine_learning_workspace = machine_learning_workspace
self.commitment_plan = commitment_plan
self.input = input
self.output = output
self.example_request = example_request
self.assets = assets
self.parameters = parameters
self.package_type = None
| mit |
tjwei/jedi | test/test_cache.py | 13 | 3178 | """
Test all things related to the ``jedi.cache`` module.
"""
import time
import pytest
import jedi
from jedi import settings, cache
from jedi.cache import ParserCacheItem, ParserPickling
ParserPicklingCls = type(ParserPickling)
ParserPickling = ParserPicklingCls()
def test_modulepickling_change_cache_dir(monkeypatch, tmpdir):
"""
ParserPickling should not save old cache when cache_directory is changed.
See: `#168 <https://github.com/davidhalter/jedi/pull/168>`_
"""
dir_1 = str(tmpdir.mkdir('first'))
dir_2 = str(tmpdir.mkdir('second'))
item_1 = ParserCacheItem('fake parser 1')
item_2 = ParserCacheItem('fake parser 2')
path_1 = 'fake path 1'
path_2 = 'fake path 2'
monkeypatch.setattr(settings, 'cache_directory', dir_1)
ParserPickling.save_parser(path_1, item_1)
cached = load_stored_item(ParserPickling, path_1, item_1)
assert cached == item_1.parser
monkeypatch.setattr(settings, 'cache_directory', dir_2)
ParserPickling.save_parser(path_2, item_2)
cached = load_stored_item(ParserPickling, path_1, item_1)
assert cached is None
def load_stored_item(cache, path, item):
"""Load `item` stored at `path` in `cache`."""
return cache.load_parser(path, item.change_time - 1)
@pytest.mark.usefixtures("isolated_jedi_cache")
def test_modulepickling_delete_incompatible_cache():
item = ParserCacheItem('fake parser')
path = 'fake path'
cache1 = ParserPicklingCls()
cache1.version = 1
cache1.save_parser(path, item)
cached1 = load_stored_item(cache1, path, item)
assert cached1 == item.parser
cache2 = ParserPicklingCls()
cache2.version = 2
cached2 = load_stored_item(cache2, path, item)
assert cached2 is None
@pytest.mark.skipif('True', message='Currently the star import cache is not enabled.')
def test_star_import_cache_duration():
new = 0.01
old, jedi.settings.star_import_cache_validity = \
jedi.settings.star_import_cache_validity, new
dct = cache._time_caches['star_import_cache_validity']
old_dct = dict(dct)
dct.clear() # first empty...
# path needs to be not-None (otherwise caching effects are not visible)
jedi.Script('', 1, 0, '').completions()
time.sleep(2 * new)
jedi.Script('', 1, 0, '').completions()
# reset values
jedi.settings.star_import_cache_validity = old
assert len(dct) == 1
dct = old_dct
cache._star_import_cache = {}
def test_cache_call_signatures():
"""
See github issue #390.
"""
def check(column, call_name, path=None):
assert jedi.Script(s, 1, column, path).call_signatures()[0].name == call_name
s = 'str(int())'
for i in range(3):
check(8, 'int')
check(4, 'str')
# Can keep doing these calls and always get the right result.
# Now lets specify a source_path of boo and alternate these calls, it
# should still work.
for i in range(3):
check(8, 'int', 'boo')
check(4, 'str', 'boo')
def test_cache_line_split_issues():
"""Should still work even if there's a newline."""
assert jedi.Script('int(\n').call_signatures()[0].name == 'int'
| mit |
menardorama/ReadyNAS-Add-ons | headphones-1.0.0/debian/headphones/apps/headphones/lib/biplist/__init__.py | 25 | 30025 | """biplist -- a library for reading and writing binary property list files.
Binary Property List (plist) files provide a faster and smaller serialization
format for property lists on OS X. This is a library for generating binary
plists which can be read by OS X, iOS, or other clients.
The API models the plistlib API, and will call through to plistlib when
XML serialization or deserialization is required.
To generate plists with UID values, wrap the values with the Uid object. The
value must be an int.
To generate plists with NSData/CFData values, wrap the values with the
Data object. The value must be a string.
Date values can only be datetime.datetime objects.
The exceptions InvalidPlistException and NotBinaryPlistException may be
thrown to indicate that the data cannot be serialized or deserialized as
a binary plist.
Plist generation example:
from biplist import *
from datetime import datetime
plist = {'aKey':'aValue',
'0':1.322,
'now':datetime.now(),
'list':[1,2,3],
'tuple':('a','b','c')
}
try:
writePlist(plist, "example.plist")
except (InvalidPlistException, NotBinaryPlistException), e:
print "Something bad happened:", e
Plist parsing example:
from biplist import *
try:
plist = readPlist("example.plist")
print plist
except (InvalidPlistException, NotBinaryPlistException), e:
print "Not a plist:", e
"""
import sys
from collections import namedtuple
import datetime
import io
import math
import plistlib
from struct import pack, unpack
from struct import error as struct_error
import sys
import time
try:
unicode
unicodeEmpty = r''
except NameError:
unicode = str
unicodeEmpty = ''
try:
long
except NameError:
long = int
try:
{}.iteritems
iteritems = lambda x: x.iteritems()
except AttributeError:
iteritems = lambda x: x.items()
__all__ = [
'Uid', 'Data', 'readPlist', 'writePlist', 'readPlistFromString',
'writePlistToString', 'InvalidPlistException', 'NotBinaryPlistException'
]
# Apple uses Jan 1, 2001 as a base for all plist date/times.
apple_reference_date = datetime.datetime.utcfromtimestamp(978307200)
class Uid(int):
"""Wrapper around integers for representing UID values. This
is used in keyed archiving."""
def __repr__(self):
return "Uid(%d)" % self
class Data(bytes):
"""Wrapper around str types for representing Data values."""
pass
class InvalidPlistException(Exception):
"""Raised when the plist is incorrectly formatted."""
pass
class NotBinaryPlistException(Exception):
"""Raised when a binary plist was expected but not encountered."""
pass
def readPlist(pathOrFile):
"""Raises NotBinaryPlistException, InvalidPlistException"""
didOpen = False
result = None
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'rb')
didOpen = True
try:
reader = PlistReader(pathOrFile)
result = reader.parse()
except NotBinaryPlistException as e:
try:
pathOrFile.seek(0)
result = None
if hasattr(plistlib, 'loads'):
contents = None
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'rb') as f:
contents = f.read()
else:
contents = pathOrFile.read()
result = plistlib.loads(contents)
else:
result = plistlib.readPlist(pathOrFile)
result = wrapDataObject(result, for_binary=True)
except Exception as e:
raise InvalidPlistException(e)
finally:
if didOpen:
pathOrFile.close()
return result
def wrapDataObject(o, for_binary=False):
if isinstance(o, Data) and not for_binary:
v = sys.version_info
if not (v[0] >= 3 and v[1] >= 4):
o = plistlib.Data(o)
elif isinstance(o, (bytes, plistlib.Data)) and for_binary:
if hasattr(o, 'data'):
o = Data(o.data)
elif isinstance(o, tuple):
o = wrapDataObject(list(o), for_binary)
o = tuple(o)
elif isinstance(o, list):
for i in range(len(o)):
o[i] = wrapDataObject(o[i], for_binary)
elif isinstance(o, dict):
for k in o:
o[k] = wrapDataObject(o[k], for_binary)
return o
def writePlist(rootObject, pathOrFile, binary=True):
if not binary:
rootObject = wrapDataObject(rootObject, binary)
if hasattr(plistlib, "dump"):
if isinstance(pathOrFile, (bytes, unicode)):
with open(pathOrFile, 'wb') as f:
return plistlib.dump(rootObject, f)
else:
return plistlib.dump(rootObject, pathOrFile)
else:
return plistlib.writePlist(rootObject, pathOrFile)
else:
didOpen = False
if isinstance(pathOrFile, (bytes, unicode)):
pathOrFile = open(pathOrFile, 'wb')
didOpen = True
writer = PlistWriter(pathOrFile)
result = writer.writeRoot(rootObject)
if didOpen:
pathOrFile.close()
return result
def readPlistFromString(data):
return readPlist(io.BytesIO(data))
def writePlistToString(rootObject, binary=True):
if not binary:
rootObject = wrapDataObject(rootObject, binary)
if hasattr(plistlib, "dumps"):
return plistlib.dumps(rootObject)
elif hasattr(plistlib, "writePlistToBytes"):
return plistlib.writePlistToBytes(rootObject)
else:
return plistlib.writePlistToString(rootObject)
else:
ioObject = io.BytesIO()
writer = PlistWriter(ioObject)
writer.writeRoot(rootObject)
return ioObject.getvalue()
def is_stream_binary_plist(stream):
stream.seek(0)
header = stream.read(7)
if header == b'bplist0':
return True
else:
return False
PlistTrailer = namedtuple('PlistTrailer', 'offsetSize, objectRefSize, offsetCount, topLevelObjectNumber, offsetTableOffset')
PlistByteCounts = namedtuple('PlistByteCounts', 'nullBytes, boolBytes, intBytes, realBytes, dateBytes, dataBytes, stringBytes, uidBytes, arrayBytes, setBytes, dictBytes')
class PlistReader(object):
file = None
contents = ''
offsets = None
trailer = None
currentOffset = 0
def __init__(self, fileOrStream):
"""Raises NotBinaryPlistException."""
self.reset()
self.file = fileOrStream
def parse(self):
return self.readRoot()
def reset(self):
self.trailer = None
self.contents = ''
self.offsets = []
self.currentOffset = 0
def readRoot(self):
result = None
self.reset()
# Get the header, make sure it's a valid file.
if not is_stream_binary_plist(self.file):
raise NotBinaryPlistException()
self.file.seek(0)
self.contents = self.file.read()
if len(self.contents) < 32:
raise InvalidPlistException("File is too short.")
trailerContents = self.contents[-32:]
try:
self.trailer = PlistTrailer._make(unpack("!xxxxxxBBQQQ", trailerContents))
offset_size = self.trailer.offsetSize * self.trailer.offsetCount
offset = self.trailer.offsetTableOffset
offset_contents = self.contents[offset:offset+offset_size]
offset_i = 0
while offset_i < self.trailer.offsetCount:
begin = self.trailer.offsetSize*offset_i
tmp_contents = offset_contents[begin:begin+self.trailer.offsetSize]
tmp_sized = self.getSizedInteger(tmp_contents, self.trailer.offsetSize)
self.offsets.append(tmp_sized)
offset_i += 1
self.setCurrentOffsetToObjectNumber(self.trailer.topLevelObjectNumber)
result = self.readObject()
except TypeError as e:
raise InvalidPlistException(e)
return result
def setCurrentOffsetToObjectNumber(self, objectNumber):
self.currentOffset = self.offsets[objectNumber]
def readObject(self):
result = None
tmp_byte = self.contents[self.currentOffset:self.currentOffset+1]
marker_byte = unpack("!B", tmp_byte)[0]
format = (marker_byte >> 4) & 0x0f
extra = marker_byte & 0x0f
self.currentOffset += 1
def proc_extra(extra):
if extra == 0b1111:
#self.currentOffset += 1
extra = self.readObject()
return extra
# bool, null, or fill byte
if format == 0b0000:
if extra == 0b0000:
result = None
elif extra == 0b1000:
result = False
elif extra == 0b1001:
result = True
elif extra == 0b1111:
pass # fill byte
else:
raise InvalidPlistException("Invalid object found at offset: %d" % (self.currentOffset - 1))
# int
elif format == 0b0001:
extra = proc_extra(extra)
result = self.readInteger(pow(2, extra))
# real
elif format == 0b0010:
extra = proc_extra(extra)
result = self.readReal(extra)
# date
elif format == 0b0011 and extra == 0b0011:
result = self.readDate()
# data
elif format == 0b0100:
extra = proc_extra(extra)
result = self.readData(extra)
# ascii string
elif format == 0b0101:
extra = proc_extra(extra)
result = self.readAsciiString(extra)
# Unicode string
elif format == 0b0110:
extra = proc_extra(extra)
result = self.readUnicode(extra)
# uid
elif format == 0b1000:
result = self.readUid(extra)
# array
elif format == 0b1010:
extra = proc_extra(extra)
result = self.readArray(extra)
# set
elif format == 0b1100:
extra = proc_extra(extra)
result = set(self.readArray(extra))
# dict
elif format == 0b1101:
extra = proc_extra(extra)
result = self.readDict(extra)
else:
raise InvalidPlistException("Invalid object found: {format: %s, extra: %s}" % (bin(format), bin(extra)))
return result
def readInteger(self, byteSize):
result = 0
original_offset = self.currentOffset
data = self.contents[self.currentOffset:self.currentOffset + byteSize]
result = self.getSizedInteger(data, byteSize, as_number=True)
self.currentOffset = original_offset + byteSize
return result
def readReal(self, length):
result = 0.0
to_read = pow(2, length)
data = self.contents[self.currentOffset:self.currentOffset+to_read]
if length == 2: # 4 bytes
result = unpack('>f', data)[0]
elif length == 3: # 8 bytes
result = unpack('>d', data)[0]
else:
raise InvalidPlistException("Unknown real of length %d bytes" % to_read)
return result
def readRefs(self, count):
refs = []
i = 0
while i < count:
fragment = self.contents[self.currentOffset:self.currentOffset+self.trailer.objectRefSize]
ref = self.getSizedInteger(fragment, len(fragment))
refs.append(ref)
self.currentOffset += self.trailer.objectRefSize
i += 1
return refs
def readArray(self, count):
result = []
values = self.readRefs(count)
i = 0
while i < len(values):
self.setCurrentOffsetToObjectNumber(values[i])
value = self.readObject()
result.append(value)
i += 1
return result
def readDict(self, count):
result = {}
keys = self.readRefs(count)
values = self.readRefs(count)
i = 0
while i < len(keys):
self.setCurrentOffsetToObjectNumber(keys[i])
key = self.readObject()
self.setCurrentOffsetToObjectNumber(values[i])
value = self.readObject()
result[key] = value
i += 1
return result
def readAsciiString(self, length):
result = unpack("!%ds" % length, self.contents[self.currentOffset:self.currentOffset+length])[0]
self.currentOffset += length
return result
def readUnicode(self, length):
actual_length = length*2
data = self.contents[self.currentOffset:self.currentOffset+actual_length]
# unpack not needed?!! data = unpack(">%ds" % (actual_length), data)[0]
self.currentOffset += actual_length
return data.decode('utf_16_be')
def readDate(self):
result = unpack(">d", self.contents[self.currentOffset:self.currentOffset+8])[0]
# Use timedelta to workaround time_t size limitation on 32-bit python.
result = datetime.timedelta(seconds=result) + apple_reference_date
self.currentOffset += 8
return result
def readData(self, length):
result = self.contents[self.currentOffset:self.currentOffset+length]
self.currentOffset += length
return Data(result)
def readUid(self, length):
return Uid(self.readInteger(length+1))
def getSizedInteger(self, data, byteSize, as_number=False):
"""Numbers of 8 bytes are signed integers when they refer to numbers, but unsigned otherwise."""
result = 0
# 1, 2, and 4 byte integers are unsigned
if byteSize == 1:
result = unpack('>B', data)[0]
elif byteSize == 2:
result = unpack('>H', data)[0]
elif byteSize == 4:
result = unpack('>L', data)[0]
elif byteSize == 8:
if as_number:
result = unpack('>q', data)[0]
else:
result = unpack('>Q', data)[0]
elif byteSize <= 16:
# Handle odd-sized or integers larger than 8 bytes
# Don't naively go over 16 bytes, in order to prevent infinite loops.
result = 0
if hasattr(int, 'from_bytes'):
result = int.from_bytes(data, 'big')
else:
for byte in data:
result = (result << 8) | unpack('>B', byte)[0]
else:
raise InvalidPlistException("Encountered integer longer than 16 bytes.")
return result
class HashableWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<HashableWrapper: %s>" % [self.value]
class BoolWrapper(object):
def __init__(self, value):
self.value = value
def __repr__(self):
return "<BoolWrapper: %s>" % self.value
class FloatWrapper(object):
_instances = {}
def __new__(klass, value):
# Ensure FloatWrapper(x) for a given float x is always the same object
wrapper = klass._instances.get(value)
if wrapper is None:
wrapper = object.__new__(klass)
wrapper.value = value
klass._instances[value] = wrapper
return wrapper
def __repr__(self):
return "<FloatWrapper: %s>" % self.value
class PlistWriter(object):
header = b'bplist00bybiplist1.0'
file = None
byteCounts = None
trailer = None
computedUniques = None
writtenReferences = None
referencePositions = None
wrappedTrue = None
wrappedFalse = None
def __init__(self, file):
self.reset()
self.file = file
self.wrappedTrue = BoolWrapper(True)
self.wrappedFalse = BoolWrapper(False)
def reset(self):
self.byteCounts = PlistByteCounts(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
self.trailer = PlistTrailer(0, 0, 0, 0, 0)
# A set of all the uniques which have been computed.
self.computedUniques = set()
# A list of all the uniques which have been written.
self.writtenReferences = {}
# A dict of the positions of the written uniques.
self.referencePositions = {}
def positionOfObjectReference(self, obj):
"""If the given object has been written already, return its
position in the offset table. Otherwise, return None."""
return self.writtenReferences.get(obj)
def writeRoot(self, root):
"""
Strategy is:
- write header
- wrap root object so everything is hashable
- compute size of objects which will be written
- need to do this in order to know how large the object refs
will be in the list/dict/set reference lists
- write objects
- keep objects in writtenReferences
- keep positions of object references in referencePositions
- write object references with the length computed previously
- computer object reference length
- write object reference positions
- write trailer
"""
output = self.header
wrapped_root = self.wrapRoot(root)
should_reference_root = True#not isinstance(wrapped_root, HashableWrapper)
self.computeOffsets(wrapped_root, asReference=should_reference_root, isRoot=True)
self.trailer = self.trailer._replace(**{'objectRefSize':self.intSize(len(self.computedUniques))})
(_, output) = self.writeObjectReference(wrapped_root, output)
output = self.writeObject(wrapped_root, output, setReferencePosition=True)
# output size at this point is an upper bound on how big the
# object reference offsets need to be.
self.trailer = self.trailer._replace(**{
'offsetSize':self.intSize(len(output)),
'offsetCount':len(self.computedUniques),
'offsetTableOffset':len(output),
'topLevelObjectNumber':0
})
output = self.writeOffsetTable(output)
output += pack('!xxxxxxBBQQQ', *self.trailer)
self.file.write(output)
def wrapRoot(self, root):
if isinstance(root, bool):
if root is True:
return self.wrappedTrue
else:
return self.wrappedFalse
elif isinstance(root, float):
return FloatWrapper(root)
elif isinstance(root, set):
n = set()
for value in root:
n.add(self.wrapRoot(value))
return HashableWrapper(n)
elif isinstance(root, dict):
n = {}
for key, value in iteritems(root):
n[self.wrapRoot(key)] = self.wrapRoot(value)
return HashableWrapper(n)
elif isinstance(root, list):
n = []
for value in root:
n.append(self.wrapRoot(value))
return HashableWrapper(n)
elif isinstance(root, tuple):
n = tuple([self.wrapRoot(value) for value in root])
return HashableWrapper(n)
else:
return root
def incrementByteCount(self, field, incr=1):
self.byteCounts = self.byteCounts._replace(**{field:self.byteCounts.__getattribute__(field) + incr})
def computeOffsets(self, obj, asReference=False, isRoot=False):
def check_key(key):
if key is None:
raise InvalidPlistException('Dictionary keys cannot be null in plists.')
elif isinstance(key, Data):
raise InvalidPlistException('Data cannot be dictionary keys in plists.')
elif not isinstance(key, (bytes, unicode)):
raise InvalidPlistException('Keys must be strings.')
def proc_size(size):
if size > 0b1110:
size += self.intSize(size)
return size
# If this should be a reference, then we keep a record of it in the
# uniques table.
if asReference:
if obj in self.computedUniques:
return
else:
self.computedUniques.add(obj)
if obj is None:
self.incrementByteCount('nullBytes')
elif isinstance(obj, BoolWrapper):
self.incrementByteCount('boolBytes')
elif isinstance(obj, Uid):
size = self.intSize(obj)
self.incrementByteCount('uidBytes', incr=1+size)
elif isinstance(obj, (int, long)):
size = self.intSize(obj)
self.incrementByteCount('intBytes', incr=1+size)
elif isinstance(obj, FloatWrapper):
size = self.realSize(obj)
self.incrementByteCount('realBytes', incr=1+size)
elif isinstance(obj, datetime.datetime):
self.incrementByteCount('dateBytes', incr=2)
elif isinstance(obj, Data):
size = proc_size(len(obj))
self.incrementByteCount('dataBytes', incr=1+size)
elif isinstance(obj, (unicode, bytes)):
size = proc_size(len(obj))
self.incrementByteCount('stringBytes', incr=1+size)
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, set):
size = proc_size(len(obj))
self.incrementByteCount('setBytes', incr=1+size)
for value in obj:
self.computeOffsets(value, asReference=True)
elif isinstance(obj, (list, tuple)):
size = proc_size(len(obj))
self.incrementByteCount('arrayBytes', incr=1+size)
for value in obj:
asRef = True
self.computeOffsets(value, asReference=True)
elif isinstance(obj, dict):
size = proc_size(len(obj))
self.incrementByteCount('dictBytes', incr=1+size)
for key, value in iteritems(obj):
check_key(key)
self.computeOffsets(key, asReference=True)
self.computeOffsets(value, asReference=True)
else:
raise InvalidPlistException("Unknown object type.")
def writeObjectReference(self, obj, output):
"""Tries to write an object reference, adding it to the references
table. Does not write the actual object bytes or set the reference
position. Returns a tuple of whether the object was a new reference
(True if it was, False if it already was in the reference table)
and the new output.
"""
position = self.positionOfObjectReference(obj)
if position is None:
self.writtenReferences[obj] = len(self.writtenReferences)
output += self.binaryInt(len(self.writtenReferences) - 1, byteSize=self.trailer.objectRefSize)
return (True, output)
else:
output += self.binaryInt(position, byteSize=self.trailer.objectRefSize)
return (False, output)
def writeObject(self, obj, output, setReferencePosition=False):
"""Serializes the given object to the output. Returns output.
If setReferencePosition is True, will set the position the
object was written.
"""
def proc_variable_length(format, length):
result = b''
if length > 0b1110:
result += pack('!B', (format << 4) | 0b1111)
result = self.writeObject(length, result)
else:
result += pack('!B', (format << 4) | length)
return result
if isinstance(obj, (str, unicode)) and obj == unicodeEmpty:
# The Apple Plist decoder can't decode a zero length Unicode string.
obj = b''
if setReferencePosition:
self.referencePositions[obj] = len(output)
if obj is None:
output += pack('!B', 0b00000000)
elif isinstance(obj, BoolWrapper):
if obj.value is False:
output += pack('!B', 0b00001000)
else:
output += pack('!B', 0b00001001)
elif isinstance(obj, Uid):
size = self.intSize(obj)
output += pack('!B', (0b1000 << 4) | size - 1)
output += self.binaryInt(obj)
elif isinstance(obj, (int, long)):
byteSize = self.intSize(obj)
root = math.log(byteSize, 2)
output += pack('!B', (0b0001 << 4) | int(root))
output += self.binaryInt(obj, as_number=True)
elif isinstance(obj, FloatWrapper):
# just use doubles
output += pack('!B', (0b0010 << 4) | 3)
output += self.binaryReal(obj)
elif isinstance(obj, datetime.datetime):
timestamp = (obj - apple_reference_date).total_seconds()
output += pack('!B', 0b00110011)
output += pack('!d', float(timestamp))
elif isinstance(obj, Data):
output += proc_variable_length(0b0100, len(obj))
output += obj
elif isinstance(obj, unicode):
byteData = obj.encode('utf_16_be')
output += proc_variable_length(0b0110, len(byteData)//2)
output += byteData
elif isinstance(obj, bytes):
output += proc_variable_length(0b0101, len(obj))
output += obj
elif isinstance(obj, HashableWrapper):
obj = obj.value
if isinstance(obj, (set, list, tuple)):
if isinstance(obj, set):
output += proc_variable_length(0b1100, len(obj))
else:
output += proc_variable_length(0b1010, len(obj))
objectsToWrite = []
for objRef in obj:
(isNew, output) = self.writeObjectReference(objRef, output)
if isNew:
objectsToWrite.append(objRef)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
elif isinstance(obj, dict):
output += proc_variable_length(0b1101, len(obj))
keys = []
values = []
objectsToWrite = []
for key, value in iteritems(obj):
keys.append(key)
values.append(value)
for key in keys:
(isNew, output) = self.writeObjectReference(key, output)
if isNew:
objectsToWrite.append(key)
for value in values:
(isNew, output) = self.writeObjectReference(value, output)
if isNew:
objectsToWrite.append(value)
for objRef in objectsToWrite:
output = self.writeObject(objRef, output, setReferencePosition=True)
return output
def writeOffsetTable(self, output):
"""Writes all of the object reference offsets."""
all_positions = []
writtenReferences = list(self.writtenReferences.items())
writtenReferences.sort(key=lambda x: x[1])
for obj,order in writtenReferences:
# Porting note: Elsewhere we deliberately replace empty unicdoe strings
# with empty binary strings, but the empty unicode string
# goes into writtenReferences. This isn't an issue in Py2
# because u'' and b'' have the same hash; but it is in
# Py3, where they don't.
if bytes != str and obj == unicodeEmpty:
obj = b''
position = self.referencePositions.get(obj)
if position is None:
raise InvalidPlistException("Error while writing offsets table. Object not found. %s" % obj)
output += self.binaryInt(position, self.trailer.offsetSize)
all_positions.append(position)
return output
def binaryReal(self, obj):
# just use doubles
result = pack('>d', obj.value)
return result
def binaryInt(self, obj, byteSize=None, as_number=False):
result = b''
if byteSize is None:
byteSize = self.intSize(obj)
if byteSize == 1:
result += pack('>B', obj)
elif byteSize == 2:
result += pack('>H', obj)
elif byteSize == 4:
result += pack('>L', obj)
elif byteSize == 8:
if as_number:
result += pack('>q', obj)
else:
result += pack('>Q', obj)
elif byteSize <= 16:
try:
result = pack('>Q', 0) + pack('>Q', obj)
except struct_error as e:
raise InvalidPlistException("Unable to pack integer %d: %s" % (obj, e))
else:
raise InvalidPlistException("Core Foundation can't handle integers with size greater than 16 bytes.")
return result
def intSize(self, obj):
"""Returns the number of bytes necessary to store the given integer."""
# SIGNED
if obj < 0: # Signed integer, always 8 bytes
return 8
# UNSIGNED
elif obj <= 0xFF: # 1 byte
return 1
elif obj <= 0xFFFF: # 2 bytes
return 2
elif obj <= 0xFFFFFFFF: # 4 bytes
return 4
# SIGNED
# 0x7FFFFFFFFFFFFFFF is the max.
elif obj <= 0x7FFFFFFFFFFFFFFF: # 8 bytes signed
return 8
elif obj <= 0xffffffffffffffff: # 8 bytes unsigned
return 16
else:
raise InvalidPlistException("Core Foundation can't handle integers with size greater than 8 bytes.")
def realSize(self, obj):
return 8
| gpl-2.0 |
mosaic-cloud/mosaic-distribution-dependencies | dependencies/nodejs/0.10.32/deps/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/android.py | 446 | 43487 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This generates makefiles suitable for inclusion into the Android build system
# via an Android.mk file. It is based on make.py, the standard makefile
# generator.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level GypAndroid.mk. This means that all
# variables in .mk-files clobber one another, and furthermore that any
# variables set potentially clash with other Android build system variables.
# Try to avoid setting global variables where possible.
import gyp
import gyp.common
import gyp.generator.make as make # Reuse global functions from make backend.
import os
import re
import subprocess
generator_default_variables = {
'OS': 'android',
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.so',
'INTERMEDIATE_DIR': '$(gyp_intermediate_dir)',
'SHARED_INTERMEDIATE_DIR': '$(gyp_shared_intermediate_dir)',
'PRODUCT_DIR': '$(gyp_shared_intermediate_dir)',
'SHARED_LIB_DIR': '$(builddir)/lib.$(TOOLSET)',
'LIB_DIR': '$(obj).$(TOOLSET)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(RULE_SOURCES)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(GYP_CONFIGURATION)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Generator-specific gyp specs.
generator_additional_non_configuration_keys = [
# Boolean to declare that this target does not want its name mangled.
'android_unmangled_name',
]
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
SHARED_FOOTER = """\
# "gyp_all_modules" is a concatenation of the "gyp_all_modules" targets from
# all the included sub-makefiles. This is just here to clarify.
gyp_all_modules:
"""
header = """\
# This file is generated by gyp; do not edit.
"""
android_standard_include_paths = set([
# JNI_H_INCLUDE in build/core/binary.mk
'dalvik/libnativehelper/include/nativehelper',
# from SRC_HEADERS in build/core/config.mk
'system/core/include',
'hardware/libhardware/include',
'hardware/libhardware_legacy/include',
'hardware/ril/include',
'dalvik/libnativehelper/include',
'frameworks/native/include',
'frameworks/native/opengl/include',
'frameworks/base/include',
'frameworks/base/opengl/include',
'frameworks/base/native/include',
'external/skia/include',
# TARGET_C_INCLUDES in build/core/combo/TARGET_linux-arm.mk
'bionic/libc/arch-arm/include',
'bionic/libc/include',
'bionic/libstdc++/include',
'bionic/libc/kernel/common',
'bionic/libc/kernel/arch-arm',
'bionic/libm/include',
'bionic/libm/include/arm',
'bionic/libthread_db/include',
])
# Map gyp target types to Android module classes.
MODULE_CLASSES = {
'static_library': 'STATIC_LIBRARIES',
'shared_library': 'SHARED_LIBRARIES',
'executable': 'EXECUTABLES',
}
def IsCPPExtension(ext):
return make.COMPILABLE_EXTENSIONS.get(ext) == 'cxx'
def Sourceify(path):
"""Convert a path to its source directory form. The Android backend does not
support options.generator_output, so this function is a noop."""
return path
# Map from qualified target to path to output.
# For Android, the target of these maps is a tuple ('static', 'modulename'),
# ('dynamic', 'modulename'), or ('path', 'some/path') instead of a string,
# since we link by module.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class AndroidMkWriter(object):
"""AndroidMkWriter packages up the writing of one target-specific Android.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, android_top_dir):
self.android_top_dir = android_top_dir
def Write(self, qualified_target, relative_target, base_path, output_filename,
spec, configs, part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
relative_target: qualified target name relative to the root
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.relative_target = relative_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
self.android_class = MODULE_CLASSES.get(self.type, 'GYP')
self.android_module = self.ComputeAndroidModule(spec)
(self.android_stem, self.android_suffix) = self.ComputeOutputParts(spec)
self.output = self.output_binary = self.ComputeOutput(spec)
# Standard header.
self.WriteLn('include $(CLEAR_VARS)\n')
# Module class and name.
self.WriteLn('LOCAL_MODULE_CLASS := ' + self.android_class)
self.WriteLn('LOCAL_MODULE := ' + self.android_module)
# Only emit LOCAL_MODULE_STEM if it's different to LOCAL_MODULE.
# The library module classes fail if the stem is set. ComputeOutputParts
# makes sure that stem == modulename in these cases.
if self.android_stem != self.android_module:
self.WriteLn('LOCAL_MODULE_STEM := ' + self.android_stem)
self.WriteLn('LOCAL_MODULE_SUFFIX := ' + self.android_suffix)
self.WriteLn('LOCAL_MODULE_TAGS := optional')
if self.toolset == 'host':
self.WriteLn('LOCAL_IS_HOST_MODULE := true')
# Grab output directories; needed for Actions and Rules.
self.WriteLn('gyp_intermediate_dir := $(call local-intermediates-dir)')
self.WriteLn('gyp_shared_intermediate_dir := '
'$(call intermediates-dir-for,GYP,shared)')
self.WriteLn()
# List files this target depends on so that actions/rules/copies/sources
# can depend on the list.
# TODO: doesn't pull in things through transitive link deps; needed?
target_dependencies = [x[1] for x in deps if x[0] == 'path']
self.WriteLn('# Make sure our deps are built first.')
self.WriteList(target_dependencies, 'GYP_TARGET_DEPENDENCIES',
local_pathify=True)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs)
# GYP generated outputs.
self.WriteList(extra_outputs, 'GYP_GENERATED_OUTPUTS', local_pathify=True)
# Set LOCAL_ADDITIONAL_DEPENDENCIES so that Android's build rules depend
# on both our dependency targets and our generated files.
self.WriteLn('# Make sure our deps and generated files are built first.')
self.WriteLn('LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) '
'$(GYP_GENERATED_OUTPUTS)')
self.WriteLn()
# Sources.
if spec.get('sources', []) or extra_sources:
self.WriteSources(spec, configs, extra_sources)
self.WriteTarget(spec, configs, deps, link_deps, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = ('path', self.output_binary)
# Update global list of link dependencies.
if self.type == 'static_library':
target_link_deps[qualified_target] = ('static', self.android_module)
elif self.type == 'shared_library':
target_link_deps[qualified_target] = ('shared', self.android_module)
self.fp.close()
return self.android_module
def WriteActions(self, actions, extra_sources, extra_outputs):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
"""
for action in actions:
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Action for target "%s" writes output to local path '
'"%s".' % (self.target, out))
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
# Prepare the actual command.
command = gyp.common.EncodePOSIXShellList(action['action'])
if 'message' in action:
quiet_cmd = 'Gyp action: %s ($@)' % action['message']
else:
quiet_cmd = 'Gyp action: %s ($@)' % name
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the gyp_*
# variables for the action rule with an absolute version so that the
# output goes in the right place.
# Only write the gyp_* rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
main_output = make.QuoteSpaces(self.LocalPathify(outputs[0]))
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# Android's envsetup.sh adds a number of directories to the path including
# the built host binary directory. This causes actions/rules invoked by
# gyp to sometimes use these instead of system versions, e.g. bison.
# The built host binaries may not be suitable, and can cause errors.
# So, we remove them from the PATH using the ANDROID_BUILD_PATHS variable
# set by envsetup.
self.WriteLn('%s: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))'
% main_output)
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, ' '.join(map(self.LocalPathify, inputs))))
self.WriteLn('\t@echo "%s"' % quiet_cmd)
self.WriteLn('\t$(hide)%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (self.LocalPathify(output), main_output))
extra_outputs += outputs
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
"""
if len(rules) == 0:
return
rule_trigger = '%s_rule_trigger' % self.android_module
did_write_rule = False
for rule in rules:
if len(rule.get('rule_sources', [])) == 0:
continue
did_write_rule = True
name = make.StringToMakefileVariable('%s_%s' % (self.relative_target,
rule['rule_name']))
self.WriteLn('\n### Generated for rule "%s":' % name)
self.WriteLn('# "%s":' % rule)
inputs = rule.get('inputs')
for rule_source in rule.get('rule_sources', []):
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
dirs = set()
for out in outputs:
if not out.startswith('$'):
print ('WARNING: Rule for target %s writes output to local path %s'
% (self.target, out))
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
extra_outputs += outputs
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.extend(outputs)
components = []
for component in rule['action']:
component = self.ExpandInputRoot(component, rule_source_root,
rule_source_dirname)
if '$(RULE_SOURCES)' in component:
component = component.replace('$(RULE_SOURCES)',
rule_source)
components.append(component)
command = gyp.common.EncodePOSIXShellList(components)
cd_action = 'cd $(gyp_local_path)/%s; ' % self.path
command = cd_action + command
if dirs:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
# We set up a rule to build the first output, and then set up
# a rule for each additional output to depend on the first.
outputs = map(self.LocalPathify, outputs)
main_output = outputs[0]
self.WriteLn('%s: gyp_local_path := $(LOCAL_PATH)' % main_output)
self.WriteLn('%s: gyp_intermediate_dir := '
'$(abspath $(gyp_intermediate_dir))' % main_output)
self.WriteLn('%s: gyp_shared_intermediate_dir := '
'$(abspath $(gyp_shared_intermediate_dir))' % main_output)
# See explanation in WriteActions.
self.WriteLn('%s: export PATH := '
'$(subst $(ANDROID_BUILD_PATHS),,$(PATH))' % main_output)
main_output_deps = self.LocalPathify(rule_source)
if inputs:
main_output_deps += ' '
main_output_deps += ' '.join([self.LocalPathify(f) for f in inputs])
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES)' %
(main_output, main_output_deps))
self.WriteLn('\t%s\n' % command)
for output in outputs[1:]:
# Make each output depend on the main output, with an empty command
# to force make to notice that the mtime has changed.
self.WriteLn('%s: %s ;' % (output, main_output))
self.WriteLn('.PHONY: %s' % (rule_trigger))
self.WriteLn('%s: %s' % (rule_trigger, main_output))
self.WriteLn('')
if did_write_rule:
extra_sources.append(rule_trigger) # Force all rules to run.
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
"""
self.WriteLn('### Generated for copy rule.')
variable = make.StringToMakefileVariable(self.relative_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# The Android build system does not allow generation of files into the
# source tree. The destination should start with a variable, which will
# typically be $(gyp_intermediate_dir) or
# $(gyp_shared_intermediate_dir). Note that we can't use an assertion
# because some of the gyp tests depend on this.
if not copy['destination'].startswith('$'):
print ('WARNING: Copy rule for target %s writes output to '
'local path %s' % (self.target, copy['destination']))
# LocalPathify() calls normpath, stripping trailing slashes.
path = Sourceify(self.LocalPathify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.LocalPathify(os.path.join(copy['destination'],
filename)))
self.WriteLn('%s: %s $(GYP_TARGET_DEPENDENCIES) | $(ACP)' %
(output, path))
self.WriteLn('\t@echo Copying: $@')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) $(ACP) -rpf $< $@')
self.WriteLn()
outputs.append(output)
self.WriteLn('%s = %s' % (variable,
' '.join(map(make.QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteSourceFlags(self, spec, configs):
"""Write out the flags and include paths used to compile source files for
the current target.
Args:
spec, configs: input from gyp.
"""
for configname, config in sorted(configs.iteritems()):
extracted_includes = []
self.WriteLn('\n# Flags passed to both C and C++ files.')
cflags, includes_from_cflags = self.ExtractIncludesFromCFlags(
config.get('cflags', []) + config.get('cflags_c', []))
extracted_includes.extend(includes_from_cflags)
self.WriteList(cflags, 'MY_CFLAGS_%s' % configname)
self.WriteList(config.get('defines'), 'MY_DEFS_%s' % configname,
prefix='-D', quoter=make.EscapeCppDefine)
self.WriteLn('\n# Include paths placed before CFLAGS/CPPFLAGS')
includes = list(config.get('include_dirs', []))
includes.extend(extracted_includes)
includes = map(Sourceify, map(self.LocalPathify, includes))
includes = self.NormalizeIncludePaths(includes)
self.WriteList(includes, 'LOCAL_C_INCLUDES_%s' % configname)
self.WriteLn('\n# Flags passed to only C++ (and not C) files.')
self.WriteList(config.get('cflags_cc'), 'LOCAL_CPPFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) '
'$(MY_DEFS_$(GYP_CONFIGURATION))')
# Undefine ANDROID for host modules
# TODO: the source code should not use macro ANDROID to tell if it's host
# or target module.
if self.toolset == 'host':
self.WriteLn('# Undefine ANDROID for host modules')
self.WriteLn('LOCAL_CFLAGS += -UANDROID')
self.WriteLn('LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) '
'$(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))')
self.WriteLn('LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))')
def WriteSources(self, spec, configs, extra_sources):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
We need to handle shared_intermediate directory source files as
a special case by copying them to the intermediate directory and
treating them as a genereated sources. Otherwise the Android build
rules won't pick them up.
Args:
spec, configs: input from gyp.
extra_sources: Sources generated from Actions or Rules.
"""
sources = filter(make.Compilable, spec.get('sources', []))
generated_not_sources = [x for x in extra_sources if not make.Compilable(x)]
extra_sources = filter(make.Compilable, extra_sources)
# Determine and output the C++ extension used by these sources.
# We simply find the first C++ file and use that extension.
all_sources = sources + extra_sources
local_cpp_extension = '.cpp'
for source in all_sources:
(root, ext) = os.path.splitext(source)
if IsCPPExtension(ext):
local_cpp_extension = ext
break
if local_cpp_extension != '.cpp':
self.WriteLn('LOCAL_CPP_EXTENSION := %s' % local_cpp_extension)
# We need to move any non-generated sources that are coming from the
# shared intermediate directory out of LOCAL_SRC_FILES and put them
# into LOCAL_GENERATED_SOURCES. We also need to move over any C++ files
# that don't match our local_cpp_extension, since Android will only
# generate Makefile rules for a single LOCAL_CPP_EXTENSION.
local_files = []
for source in sources:
(root, ext) = os.path.splitext(source)
if '$(gyp_shared_intermediate_dir)' in source:
extra_sources.append(source)
elif '$(gyp_intermediate_dir)' in source:
extra_sources.append(source)
elif IsCPPExtension(ext) and ext != local_cpp_extension:
extra_sources.append(source)
else:
local_files.append(os.path.normpath(os.path.join(self.path, source)))
# For any generated source, if it is coming from the shared intermediate
# directory then we add a Make rule to copy them to the local intermediate
# directory first. This is because the Android LOCAL_GENERATED_SOURCES
# must be in the local module intermediate directory for the compile rules
# to work properly. If the file has the wrong C++ extension, then we add
# a rule to copy that to intermediates and use the new version.
final_generated_sources = []
# If a source file gets copied, we still need to add the orginal source
# directory as header search path, for GCC searches headers in the
# directory that contains the source file by default.
origin_src_dirs = []
for source in extra_sources:
local_file = source
if not '$(gyp_intermediate_dir)/' in local_file:
basename = os.path.basename(local_file)
local_file = '$(gyp_intermediate_dir)/' + basename
(root, ext) = os.path.splitext(local_file)
if IsCPPExtension(ext) and ext != local_cpp_extension:
local_file = root + local_cpp_extension
if local_file != source:
self.WriteLn('%s: %s' % (local_file, self.LocalPathify(source)))
self.WriteLn('\tmkdir -p $(@D); cp $< $@')
origin_src_dirs.append(os.path.dirname(source))
final_generated_sources.append(local_file)
# We add back in all of the non-compilable stuff to make sure that the
# make rules have dependencies on them.
final_generated_sources.extend(generated_not_sources)
self.WriteList(final_generated_sources, 'LOCAL_GENERATED_SOURCES')
origin_src_dirs = gyp.common.uniquer(origin_src_dirs)
origin_src_dirs = map(Sourceify, map(self.LocalPathify, origin_src_dirs))
self.WriteList(origin_src_dirs, 'GYP_COPIED_SOURCE_ORIGIN_DIRS')
self.WriteList(local_files, 'LOCAL_SRC_FILES')
# Write out the flags used to compile the source; this must be done last
# so that GYP_COPIED_SOURCE_ORIGIN_DIRS can be used as an include path.
self.WriteSourceFlags(spec, configs)
def ComputeAndroidModule(self, spec):
"""Return the Android module name used for a gyp spec.
We use the complete qualified target name to avoid collisions between
duplicate targets in different directories. We also add a suffix to
distinguish gyp-generated module names.
"""
if int(spec.get('android_unmangled_name', 0)):
assert self.type != 'shared_library' or self.target.startswith('lib')
return self.target
if self.type == 'shared_library':
# For reasons of convention, the Android build system requires that all
# shared library modules are named 'libfoo' when generating -l flags.
prefix = 'lib_'
else:
prefix = ''
if spec['toolset'] == 'host':
suffix = '_host_gyp'
else:
suffix = '_gyp'
if self.path:
name = '%s%s_%s%s' % (prefix, self.path, self.target, suffix)
else:
name = '%s%s%s' % (prefix, self.target, suffix)
return make.StringToMakefileVariable(name)
def ComputeOutputParts(self, spec):
"""Return the 'output basename' of a gyp spec, split into filename + ext.
Android libraries must be named the same thing as their module name,
otherwise the linker can't find them, so product_name and so on must be
ignored if we are building a library, and the "lib" prepending is
not done for Android.
"""
assert self.type != 'loadable_module' # TODO: not supported?
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.a'
elif self.type == 'shared_library':
target = self.ComputeAndroidModule(spec)
target_ext = '.so'
elif self.type == 'none':
target_ext = '.stamp'
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
if self.type != 'static_library' and self.type != 'shared_library':
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
target_stem = target_prefix + target
return (target_stem, target_ext)
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
return ''.join(self.ComputeOutputParts(spec))
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
if self.type == 'executable' and self.toolset == 'host':
# We install host executables into shared_intermediate_dir so they can be
# run by gyp rules that refer to PRODUCT_DIR.
path = '$(gyp_shared_intermediate_dir)'
elif self.type == 'shared_library':
if self.toolset == 'host':
path = '$(HOST_OUT_INTERMEDIATE_LIBRARIES)'
else:
path = '$(TARGET_OUT_INTERMEDIATE_LIBRARIES)'
else:
# Other targets just get built into their intermediate dir.
if self.toolset == 'host':
path = '$(call intermediates-dir-for,%s,%s,true)' % (self.android_class,
self.android_module)
else:
path = '$(call intermediates-dir-for,%s,%s)' % (self.android_class,
self.android_module)
assert spec.get('product_dir') is None # TODO: not supported?
return os.path.join(path, self.ComputeOutputBasename(spec))
def NormalizeIncludePaths(self, include_paths):
""" Normalize include_paths.
Convert absolute paths to relative to the Android top directory;
filter out include paths that are already brought in by the Android build
system.
Args:
include_paths: A list of unprocessed include paths.
Returns:
A list of normalized include paths.
"""
normalized = []
for path in include_paths:
if path[0] == '/':
path = gyp.common.RelativePath(path, self.android_top_dir)
# Filter out the Android standard search path.
if path not in android_standard_include_paths:
normalized.append(path)
return normalized
def ExtractIncludesFromCFlags(self, cflags):
"""Extract includes "-I..." out from cflags
Args:
cflags: A list of compiler flags, which may be mixed with "-I.."
Returns:
A tuple of lists: (clean_clfags, include_paths). "-I.." is trimmed.
"""
clean_cflags = []
include_paths = []
for flag in cflags:
if flag.startswith('-I'):
include_paths.append(flag[2:])
else:
clean_cflags.append(flag)
return (clean_cflags, include_paths)
def ComputeAndroidLibraryModuleNames(self, libraries):
"""Compute the Android module names from libraries, ie spec.get('libraries')
Args:
libraries: the value of spec.get('libraries')
Returns:
A tuple (static_lib_modules, dynamic_lib_modules)
"""
static_lib_modules = []
dynamic_lib_modules = []
for libs in libraries:
# Libs can have multiple words.
for lib in libs.split():
# Filter the system libraries, which are added by default by the Android
# build system.
if (lib == '-lc' or lib == '-lstdc++' or lib == '-lm' or
lib.endswith('libgcc.a')):
continue
match = re.search(r'([^/]+)\.a$', lib)
if match:
static_lib_modules.append(match.group(1))
continue
match = re.search(r'([^/]+)\.so$', lib)
if match:
dynamic_lib_modules.append(match.group(1))
continue
# "-lstlport" -> libstlport
if lib.startswith('-l'):
if lib.endswith('_static'):
static_lib_modules.append('lib' + lib[2:])
else:
dynamic_lib_modules.append('lib' + lib[2:])
return (static_lib_modules, dynamic_lib_modules)
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteTargetFlags(self, spec, configs, link_deps):
"""Write Makefile code to specify the link flags and library dependencies.
spec, configs: input from gyp.
link_deps: link dependency list; see ComputeDeps()
"""
for configname, config in sorted(configs.iteritems()):
ldflags = list(config.get('ldflags', []))
self.WriteLn('')
self.WriteList(ldflags, 'LOCAL_LDFLAGS_%s' % configname)
self.WriteLn('\nLOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))')
# Libraries (i.e. -lfoo)
libraries = gyp.common.uniquer(spec.get('libraries', []))
static_libs, dynamic_libs = self.ComputeAndroidLibraryModuleNames(
libraries)
# Link dependencies (i.e. libfoo.a, libfoo.so)
static_link_deps = [x[1] for x in link_deps if x[0] == 'static']
shared_link_deps = [x[1] for x in link_deps if x[0] == 'shared']
self.WriteLn('')
self.WriteList(static_libs + static_link_deps,
'LOCAL_STATIC_LIBRARIES')
self.WriteLn('# Enable grouping to fix circular references')
self.WriteLn('LOCAL_GROUP_STATIC_LIBRARIES := true')
self.WriteLn('')
self.WriteList(dynamic_libs + shared_link_deps,
'LOCAL_SHARED_LIBRARIES')
def WriteTarget(self, spec, configs, deps, link_deps, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if self.type != 'none':
self.WriteTargetFlags(spec, configs, link_deps)
# Add to the set of targets which represent the gyp 'all' target. We use the
# name 'gyp_all_modules' as the Android build system doesn't allow the use
# of the Make target 'all' and because 'all_modules' is the equivalent of
# the Make target 'all' on Android.
if part_of_all:
self.WriteLn('# Add target alias to "gyp_all_modules" target.')
self.WriteLn('.PHONY: gyp_all_modules')
self.WriteLn('gyp_all_modules: %s' % self.android_module)
self.WriteLn('')
# Add an alias from the gyp target name to the Android module name. This
# simplifies manual builds of the target, and is required by the test
# framework.
if self.target != self.android_module:
self.WriteLn('# Alias gyp target name.')
self.WriteLn('.PHONY: %s' % self.target)
self.WriteLn('%s: %s' % (self.target, self.android_module))
self.WriteLn('')
# Add the command to trigger build of the target type depending
# on the toolset. Ex: BUILD_STATIC_LIBRARY vs. BUILD_HOST_STATIC_LIBRARY
# NOTE: This has to come last!
modifier = ''
if self.toolset == 'host':
modifier = 'HOST_'
if self.type == 'static_library':
self.WriteLn('include $(BUILD_%sSTATIC_LIBRARY)' % modifier)
elif self.type == 'shared_library':
self.WriteLn('LOCAL_PRELINK_MODULE := false')
self.WriteLn('include $(BUILD_%sSHARED_LIBRARY)' % modifier)
elif self.type == 'executable':
if self.toolset == 'host':
self.WriteLn('LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)')
else:
# Don't install target executables for now, as it results in them being
# included in ROM. This can be revisited if there's a reason to install
# them later.
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn('include $(BUILD_%sEXECUTABLE)' % modifier)
else:
self.WriteLn('LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp')
self.WriteLn('LOCAL_UNINSTALLABLE_MODULE := true')
self.WriteLn()
self.WriteLn('include $(BUILD_SYSTEM)/base_rules.mk')
self.WriteLn()
self.WriteLn('$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)')
self.WriteLn('\t$(hide) echo "Gyp timestamp: $@"')
self.WriteLn('\t$(hide) mkdir -p $(dir $@)')
self.WriteLn('\t$(hide) touch $@')
def WriteList(self, value_list, variable=None, prefix='',
quoter=make.QuoteIfNecessary, local_pathify=False):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
if local_pathify:
value_list = [self.LocalPathify(l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def LocalPathify(self, path):
"""Convert a subdirectory-relative path into a normalized path which starts
with the make variable $(LOCAL_PATH) (i.e. the top of the project tree).
Absolute paths, or paths that contain variables, are just normalized."""
if '$(' in path or os.path.isabs(path):
# path is not a file in the project tree in this case, but calling
# normpath is still important for trimming trailing slashes.
return os.path.normpath(path)
local_path = os.path.join('$(LOCAL_PATH)', self.path, path)
local_path = os.path.normpath(local_path)
# Check that normalizing the path didn't ../ itself out of $(LOCAL_PATH)
# - i.e. that the resulting path is still inside the project tree. The
# path may legitimately have ended up containing just $(LOCAL_PATH), though,
# so we don't look for a slash.
assert local_path.startswith('$(LOCAL_PATH)'), (
'Path %s attempts to escape from gyp path %s !)' % (path, self.path))
return local_path
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def PerformBuild(data, configurations, params):
# The android backend only supports the default configuration.
options = params['options']
makefile = os.path.abspath(os.path.join(options.toplevel_dir,
'GypAndroid.mk'))
env = dict(os.environ)
env['ONE_SHOT_MAKEFILE'] = makefile
arguments = ['make', '-C', os.environ['ANDROID_BUILD_TOP'], 'gyp_all_modules']
print 'Building: %s' % arguments
subprocess.check_call(arguments, env=env)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
limit_to_target_all = generator_flags.get('limit_to_target_all', False)
android_top_dir = os.environ.get('ANDROID_BUILD_TOP')
assert android_top_dir, '$ANDROID_BUILD_TOP not set; you need to run lunch.'
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'GypAndroid' + options.suffix + '.mk'
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
assert not options.generator_output, (
'The Android backend does not support options.generator_output.')
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(header)
# We set LOCAL_PATH just once, here, to the top of the project tree. This
# allows all the other paths we use to be relative to the Android.mk file,
# as the Android build system expects.
root_makefile.write('\nLOCAL_PATH := $(call my-dir)\n')
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
android_modules = {}
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
relative_build_file = gyp.common.RelativePath(build_file,
options.toplevel_dir)
build_files.add(relative_build_file)
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
part_of_all = (qualified_target in needed_targets and
not int(spec.get('suppress_wildcard', False)))
if limit_to_target_all and not part_of_all:
continue
relative_target = gyp.common.QualifiedTarget(relative_build_file, target,
toolset)
writer = AndroidMkWriter(android_top_dir)
android_module = writer.Write(qualified_target, relative_target, base_path,
output_file, spec, configs,
part_of_all=part_of_all)
if android_module in android_modules:
print ('ERROR: Android module names must be unique. The following '
'targets both generate Android module name %s.\n %s\n %s' %
(android_module, android_modules[android_module],
qualified_target))
return
android_modules[android_module] = qualified_target
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
root_makefile.write('GYP_CONFIGURATION ?= %s\n' % default_configuration)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
root_makefile.write('include $(LOCAL_PATH)/' + include_file + '\n')
root_makefile.write('\n')
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| apache-2.0 |
savoirfairelinux/OpenUpgrade | addons/account/account.py | 13 | 189623 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import itemgetter
import time
import openerp
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv, expression
from openerp.tools.translate import _
from openerp.tools.float_utils import float_round as round
import openerp.addons.decimal_precision as dp
_logger = logging.getLogger(__name__)
def check_cycle(self, cr, uid, ids, context=None):
""" climbs the ``self._table.parent_id`` chains for 100 levels or
until it can't find any more parent(s)
Returns true if it runs out of parents (no cycle), false if
it can recurse 100 times without ending all chains
"""
level = 100
while len(ids):
cr.execute('SELECT DISTINCT parent_id '\
'FROM '+self._table+' '\
'WHERE id IN %s '\
'AND parent_id IS NOT NULL',(tuple(ids),))
ids = map(itemgetter(0), cr.fetchall())
if not level:
return False
level -= 1
return True
class account_payment_term(osv.osv):
_name = "account.payment.term"
_description = "Payment Term"
_columns = {
'name': fields.char('Payment Term', size=64, translate=True, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the payment term without removing it."),
'note': fields.text('Description', translate=True),
'line_ids': fields.one2many('account.payment.term.line', 'payment_id', 'Terms'),
}
_defaults = {
'active': 1,
}
_order = "name"
def compute(self, cr, uid, id, value, date_ref=False, context=None):
if not date_ref:
date_ref = datetime.now().strftime('%Y-%m-%d')
pt = self.browse(cr, uid, id, context=context)
amount = value
result = []
obj_precision = self.pool.get('decimal.precision')
prec = obj_precision.precision_get(cr, uid, 'Account')
for line in pt.line_ids:
if line.value == 'fixed':
amt = round(line.value_amount, prec)
elif line.value == 'procent':
amt = round(value * line.value_amount, prec)
elif line.value == 'balance':
amt = round(amount, prec)
if amt:
next_date = (datetime.strptime(date_ref, '%Y-%m-%d') + relativedelta(days=line.days))
if line.days2 < 0:
next_first_date = next_date + relativedelta(day=1,months=1) #Getting 1st of next month
next_date = next_first_date + relativedelta(days=line.days2)
if line.days2 > 0:
next_date += relativedelta(day=line.days2, months=1)
result.append( (next_date.strftime('%Y-%m-%d'), amt) )
amount -= amt
amount = reduce(lambda x,y: x+y[1], result, 0.0)
dist = round(value-amount, prec)
if dist:
result.append( (time.strftime('%Y-%m-%d'), dist) )
return result
class account_payment_term_line(osv.osv):
_name = "account.payment.term.line"
_description = "Payment Term Line"
_columns = {
'value': fields.selection([('procent', 'Percent'),
('balance', 'Balance'),
('fixed', 'Fixed Amount')], 'Computation',
required=True, help="""Select here the kind of valuation related to this payment term line. Note that you should have your last line with the type 'Balance' to ensure that the whole amount will be treated."""),
'value_amount': fields.float('Amount To Pay', digits_compute=dp.get_precision('Payment Term'), help="For percent enter a ratio between 0-1."),
'days': fields.integer('Number of Days', required=True, help="Number of days to add before computation of the day of month." \
"If Date=15/01, Number of Days=22, Day of Month=-1, then the due date is 28/02."),
'days2': fields.integer('Day of the Month', required=True, help="Day of the month, set -1 for the last day of the current month. If it's positive, it gives the day of the next month. Set 0 for net days (otherwise it's based on the beginning of the month)."),
'payment_id': fields.many2one('account.payment.term', 'Payment Term', required=True, select=True, ondelete='cascade'),
}
_defaults = {
'value': 'balance',
'days': 30,
'days2': 0,
}
_order = "value desc,days"
def _check_percent(self, cr, uid, ids, context=None):
obj = self.browse(cr, uid, ids[0], context=context)
if obj.value == 'procent' and ( obj.value_amount < 0.0 or obj.value_amount > 1.0):
return False
return True
_constraints = [
(_check_percent, 'Percentages for Payment Term Line must be between 0 and 1, Example: 0.02 for 2%.', ['value_amount']),
]
class account_account_type(osv.osv):
_name = "account.account.type"
_description = "Account Type"
def _get_financial_report_ref(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
obj_financial_report = self.pool.get('account.financial.report')
financial_report_ref = {}
for key, financial_report in [
('asset','account_financial_report_assets0'),
('liability','account_financial_report_liability0'),
('income','account_financial_report_income0'),
('expense','account_financial_report_expense0'),
]:
try:
financial_report_ref[key] = obj_financial_report.browse(cr, uid,
obj_data.get_object_reference(cr, uid, 'account', financial_report)[1],
context=context)
except ValueError:
pass
return financial_report_ref
def _get_current_report_type(self, cr, uid, ids, name, arg, context=None):
res = {}
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for record in self.browse(cr, uid, ids, context=context):
res[record.id] = 'none'
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if record.id in list_ids:
res[record.id] = key
return res
def _save_report_type(self, cr, uid, account_type_id, field_name, field_value, arg, context=None):
field_value = field_value or 'none'
obj_financial_report = self.pool.get('account.financial.report')
#unlink if it exists somewhere in the financial reports related to BS or PL
financial_report_ref = self._get_financial_report_ref(cr, uid, context=context)
for key, financial_report in financial_report_ref.items():
list_ids = [x.id for x in financial_report.account_type_ids]
if account_type_id in list_ids:
obj_financial_report.write(cr, uid, [financial_report.id], {'account_type_ids': [(3, account_type_id)]})
#write it in the good place
if field_value != 'none':
return obj_financial_report.write(cr, uid, [financial_report_ref[field_value].id], {'account_type_ids': [(4, account_type_id)]})
_columns = {
'name': fields.char('Account Type', size=64, required=True, translate=True),
'code': fields.char('Code', size=32, required=True, select=True),
'close_method': fields.selection([('none', 'None'), ('balance', 'Balance'), ('detail', 'Detail'), ('unreconciled', 'Unreconciled')], 'Deferral Method', required=True, help="""Set here the method that will be used to generate the end of year journal entries for all the accounts of this type.
'None' means that nothing will be done.
'Balance' will generally be used for cash accounts.
'Detail' will copy each existing journal item of the previous year, even the reconciled ones.
'Unreconciled' will copy only the journal items that were unreconciled on the first day of the new fiscal year."""),
'report_type': fields.function(_get_current_report_type, fnct_inv=_save_report_type, type='selection', string='P&L / BS Category', store=True,
selection= [('none','/'),
('income', _('Profit & Loss (Income account)')),
('expense', _('Profit & Loss (Expense account)')),
('asset', _('Balance Sheet (Asset account)')),
('liability', _('Balance Sheet (Liability account)'))], help="This field is used to generate legal reports: profit and loss, balance sheet.", required=True),
'note': fields.text('Description'),
}
_defaults = {
'close_method': 'none',
'report_type': 'none',
}
_order = "code"
def _code_get(self, cr, uid, context=None):
acc_type_obj = self.pool.get('account.account.type')
ids = acc_type_obj.search(cr, uid, [])
res = acc_type_obj.read(cr, uid, ids, ['code', 'name'], context=context)
return [(r['code'], r['name']) for r in res]
#----------------------------------------------------------
# Accounts
#----------------------------------------------------------
class account_tax(osv.osv):
_name = 'account.tax'
class account_account(osv.osv):
_order = "parent_left"
_parent_order = "code"
_name = "account.account"
_description = "Account"
_parent_store = True
def search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False):
if context is None:
context = {}
pos = 0
while pos < len(args):
if args[pos][0] == 'code' and args[pos][1] in ('like', 'ilike') and args[pos][2]:
args[pos] = ('code', '=like', tools.ustr(args[pos][2].replace('%', ''))+'%')
if args[pos][0] == 'journal_id':
if not args[pos][2]:
del args[pos]
continue
jour = self.pool.get('account.journal').browse(cr, uid, args[pos][2], context=context)
if (not (jour.account_control_ids or jour.type_control_ids)) or not args[pos][2]:
args[pos] = ('type','not in',('consolidation','view'))
continue
ids3 = map(lambda x: x.id, jour.type_control_ids)
ids1 = super(account_account, self).search(cr, uid, [('user_type', 'in', ids3)])
ids1 += map(lambda x: x.id, jour.account_control_ids)
args[pos] = ('id', 'in', ids1)
pos += 1
if context and context.has_key('consolidate_children'): #add consolidated children of accounts
ids = super(account_account, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
for consolidate_child in self.browse(cr, uid, context['account_id'], context=context).child_consol_ids:
ids.append(consolidate_child.id)
return ids
return super(account_account, self).search(cr, uid, args, offset, limit,
order, context=context, count=count)
def _get_children_and_consol(self, cr, uid, ids, context=None):
#this function search for all the children and all consolidated children (recursively) of the given account ids
ids2 = self.search(cr, uid, [('parent_id', 'child_of', ids)], context=context)
ids3 = []
for rec in self.browse(cr, uid, ids2, context=context):
for child in rec.child_consol_ids:
ids3.append(child.id)
if ids3:
ids3 = self._get_children_and_consol(cr, uid, ids3, context)
return ids2 + ids3
def __compute(self, cr, uid, ids, field_names, arg=None, context=None,
query='', query_params=()):
""" compute the balance, debit and/or credit for the provided
account ids
Arguments:
`ids`: account ids
`field_names`: the fields to compute (a list of any of
'balance', 'debit' and 'credit')
`arg`: unused fields.function stuff
`query`: additional query filter (as a string)
`query_params`: parameters for the provided query string
(__compute will handle their escaping) as a
tuple
"""
mapping = {
'balance': "COALESCE(SUM(l.debit),0) - COALESCE(SUM(l.credit), 0) as balance",
'debit': "COALESCE(SUM(l.debit), 0) as debit",
'credit': "COALESCE(SUM(l.credit), 0) as credit",
# by convention, foreign_balance is 0 when the account has no secondary currency, because the amounts may be in different currencies
'foreign_balance': "(SELECT CASE WHEN currency_id IS NULL THEN 0 ELSE COALESCE(SUM(l.amount_currency), 0) END FROM account_account WHERE id IN (l.account_id)) as foreign_balance",
}
#get all the necessary accounts
children_and_consolidated = self._get_children_and_consol(cr, uid, ids, context=context)
#compute for each account the balance/debit/credit from the move lines
accounts = {}
res = {}
null_result = dict((fn, 0.0) for fn in field_names)
if children_and_consolidated:
aml_query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
wheres = [""]
if query.strip():
wheres.append(query.strip())
if aml_query.strip():
wheres.append(aml_query.strip())
filters = " AND ".join(wheres)
# IN might not work ideally in case there are too many
# children_and_consolidated, in that case join on a
# values() e.g.:
# SELECT l.account_id as id FROM account_move_line l
# INNER JOIN (VALUES (id1), (id2), (id3), ...) AS tmp (id)
# ON l.account_id = tmp.id
# or make _get_children_and_consol return a query and join on that
request = ("SELECT l.account_id as id, " +\
', '.join(mapping.values()) +
" FROM account_move_line l" \
" WHERE l.account_id IN %s " \
+ filters +
" GROUP BY l.account_id")
params = (tuple(children_and_consolidated),) + query_params
cr.execute(request, params)
for row in cr.dictfetchall():
accounts[row['id']] = row
# consolidate accounts with direct children
children_and_consolidated.reverse()
brs = list(self.browse(cr, uid, children_and_consolidated, context=context))
sums = {}
currency_obj = self.pool.get('res.currency')
while brs:
current = brs.pop(0)
# can_compute = True
# for child in current.child_id:
# if child.id not in sums:
# can_compute = False
# try:
# brs.insert(0, brs.pop(brs.index(child)))
# except ValueError:
# brs.insert(0, child)
# if can_compute:
for fn in field_names:
sums.setdefault(current.id, {})[fn] = accounts.get(current.id, {}).get(fn, 0.0)
for child in current.child_id:
if child.company_id.currency_id.id == current.company_id.currency_id.id:
sums[current.id][fn] += sums[child.id][fn]
else:
sums[current.id][fn] += currency_obj.compute(cr, uid, child.company_id.currency_id.id, current.company_id.currency_id.id, sums[child.id][fn], context=context)
# as we have to relay on values computed before this is calculated separately than previous fields
if current.currency_id and current.exchange_rate and \
('adjusted_balance' in field_names or 'unrealized_gain_loss' in field_names):
# Computing Adjusted Balance and Unrealized Gains and losses
# Adjusted Balance = Foreign Balance / Exchange Rate
# Unrealized Gains and losses = Adjusted Balance - Balance
adj_bal = sums[current.id].get('foreign_balance', 0.0) / current.exchange_rate
sums[current.id].update({'adjusted_balance': adj_bal, 'unrealized_gain_loss': adj_bal - sums[current.id].get('balance', 0.0)})
for id in ids:
res[id] = sums.get(id, null_result)
else:
for id in ids:
res[id] = null_result
return res
def _get_company_currency(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context=context):
result[rec.id] = (rec.company_id.currency_id.id,rec.company_id.currency_id.symbol)
return result
def _get_child_ids(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for record in self.browse(cr, uid, ids, context=context):
if record.child_parent_ids:
result[record.id] = [x.id for x in record.child_parent_ids]
else:
result[record.id] = []
if record.child_consol_ids:
for acc in record.child_consol_ids:
if acc.id not in result[record.id]:
result[record.id].append(acc.id)
return result
def _get_level(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for account in self.browse(cr, uid, ids, context=context):
#we may not know the level of the parent at the time of computation, so we
# can't simply do res[account.id] = account.parent_id.level + 1
level = 0
parent = account.parent_id
while parent:
level += 1
parent = parent.parent_id
res[account.id] = level
return res
def _set_credit_debit(self, cr, uid, account_id, name, value, arg, context=None):
if context.get('config_invisible', True):
return True
account = self.browse(cr, uid, account_id, context=context)
diff = value - getattr(account,name)
if not diff:
return True
journal_obj = self.pool.get('account.journal')
jids = journal_obj.search(cr, uid, [('type','=','situation'),('centralisation','=',1),('company_id','=',account.company_id.id)], context=context)
if not jids:
raise osv.except_osv(_('Error!'),_("You need an Opening journal with centralisation checked to set the initial balance."))
period_obj = self.pool.get('account.period')
pids = period_obj.search(cr, uid, [('special','=',True),('company_id','=',account.company_id.id)], context=context)
if not pids:
raise osv.except_osv(_('Error!'),_("There is no opening/closing period defined, please create one to set the initial balance."))
move_obj = self.pool.get('account.move.line')
move_id = move_obj.search(cr, uid, [
('journal_id','=',jids[0]),
('period_id','=',pids[0]),
('account_id','=', account_id),
(name,'>', 0.0),
('name','=', _('Opening Balance'))
], context=context)
if move_id:
move = move_obj.browse(cr, uid, move_id[0], context=context)
move_obj.write(cr, uid, move_id[0], {
name: diff+getattr(move,name)
}, context=context)
else:
if diff<0.0:
raise osv.except_osv(_('Error!'),_("Unable to adapt the initial balance (negative value)."))
nameinv = (name=='credit' and 'debit') or 'credit'
move_id = move_obj.create(cr, uid, {
'name': _('Opening Balance'),
'account_id': account_id,
'journal_id': jids[0],
'period_id': pids[0],
name: diff,
nameinv: 0.0
}, context=context)
return True
_columns = {
'name': fields.char('Name', size=256, required=True, select=True),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'code': fields.char('Code', size=64, required=True, select=1),
'type': fields.selection([
('view', 'View'),
('other', 'Regular'),
('receivable', 'Receivable'),
('payable', 'Payable'),
('liquidity','Liquidity'),
('consolidation', 'Consolidation'),
('closed', 'Closed'),
], 'Internal Type', required=True, help="The 'Internal Type' is used for features available on "\
"different types of accounts: view can not have journal items, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'user_type': fields.many2one('account.account.type', 'Account Type', required=True,
help="Account Type is used for information purpose, to generate "
"country-specific legal reports, and set the rules to close a fiscal year and generate opening entries."),
'financial_report_ids': fields.many2many('account.financial.report', 'account_account_financial_report', 'account_id', 'report_line_id', 'Financial Reports'),
'parent_id': fields.many2one('account.account', 'Parent', ondelete='cascade', domain=[('type','=','view')]),
'child_parent_ids': fields.one2many('account.account','parent_id','Children'),
'child_consol_ids': fields.many2many('account.account', 'account_account_consol_rel', 'child_id', 'parent_id', 'Consolidated Children'),
'child_id': fields.function(_get_child_ids, type='many2many', relation="account.account", string="Child Accounts"),
'balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Balance', multi='balance'),
'credit': fields.function(__compute, fnct_inv=_set_credit_debit, digits_compute=dp.get_precision('Account'), string='Credit', multi='balance'),
'debit': fields.function(__compute, fnct_inv=_set_credit_debit, digits_compute=dp.get_precision('Account'), string='Debit', multi='balance'),
'foreign_balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Foreign Balance', multi='balance',
help="Total amount (in Secondary currency) for transactions held in secondary currency for this account."),
'adjusted_balance': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Adjusted Balance', multi='balance',
help="Total amount (in Company currency) for transactions held in secondary currency for this account."),
'unrealized_gain_loss': fields.function(__compute, digits_compute=dp.get_precision('Account'), string='Unrealized Gain or Loss', multi='balance',
help="Value of Loss or Gain due to changes in exchange rate when doing multi-currency transactions."),
'reconcile': fields.boolean('Allow Reconciliation', help="Check this box if this account allows reconciliation of journal items."),
'exchange_rate': fields.related('currency_id', 'rate', type='float', string='Exchange Rate', digits=(12,6)),
'shortcut': fields.char('Shortcut', size=12),
'tax_ids': fields.many2many('account.tax', 'account_account_tax_default_rel',
'account_id', 'tax_id', 'Default Taxes'),
'note': fields.text('Internal Notes'),
'company_currency_id': fields.function(_get_company_currency, type='many2one', relation='res.currency', string='Company Currency'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'active': fields.boolean('Active', select=2, help="If the active field is set to False, it will allow you to hide the account without removing it."),
'parent_left': fields.integer('Parent Left', select=1),
'parent_right': fields.integer('Parent Right', select=1),
'currency_mode': fields.selection([('current', 'At Date'), ('average', 'Average Rate')], 'Outgoing Currencies Rate',
help=
'This will select how the current currency rate for outgoing transactions is computed. '\
'In most countries the legal method is "average" but only a few software systems are able to '\
'manage this. So if you import from another software system you may have to use the rate at date. ' \
'Incoming transactions always use the rate at date.', \
required=True),
'level': fields.function(_get_level, string='Level', method=True, type='integer',
store={
'account.account': (_get_children_and_consol, ['level', 'parent_id'], 10),
}),
}
_defaults = {
'type': 'other',
'reconcile': False,
'active': True,
'currency_mode': 'current',
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'account.account', context=c),
}
def _check_recursion(self, cr, uid, ids, context=None):
obj_self = self.browse(cr, uid, ids[0], context=context)
p_id = obj_self.parent_id and obj_self.parent_id.id
if (obj_self in obj_self.child_consol_ids) or (p_id and (p_id is obj_self.id)):
return False
while(ids):
cr.execute('SELECT DISTINCT child_id '\
'FROM account_account_consol_rel '\
'WHERE parent_id IN %s', (tuple(ids),))
child_ids = map(itemgetter(0), cr.fetchall())
c_ids = child_ids
if (p_id and (p_id in c_ids)) or (obj_self.id in c_ids):
return False
while len(c_ids):
s_ids = self.search(cr, uid, [('parent_id', 'in', c_ids)])
if p_id and (p_id in s_ids):
return False
c_ids = s_ids
ids = child_ids
return True
def _check_type(self, cr, uid, ids, context=None):
if context is None:
context = {}
accounts = self.browse(cr, uid, ids, context=context)
for account in accounts:
if account.child_id and account.type not in ('view', 'consolidation'):
return False
return True
def _check_account_type(self, cr, uid, ids, context=None):
for account in self.browse(cr, uid, ids, context=context):
if account.type in ('receivable', 'payable') and account.user_type.close_method != 'unreconciled':
return False
return True
def _check_company_account(self, cr, uid, ids, context=None):
for account in self.browse(cr, uid, ids, context=context):
if account.parent_id:
if account.company_id != account.parent_id.company_id:
return False
return True
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive accounts.', ['parent_id']),
(_check_type, 'Configuration Error!\nYou cannot define children to an account with internal type different of "View".', ['type']),
(_check_account_type, 'Configuration Error!\nYou cannot select an account type with a deferral method different of "Unreconciled" for accounts with internal type "Payable/Receivable".', ['user_type','type']),
(_check_company_account, 'Error!\nYou cannot create an account which has parent account of different company.', ['parent_id']),
]
_sql_constraints = [
('code_company_uniq', 'unique (code,company_id)', 'The code of the account must be unique per company !')
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
args = args[:]
ids = []
try:
if name and str(name).startswith('partner:'):
part_id = int(name.split(':')[1])
part = self.pool.get('res.partner').browse(cr, user, part_id, context=context)
args += [('id', 'in', (part.property_account_payable.id, part.property_account_receivable.id))]
name = False
if name and str(name).startswith('type:'):
type = name.split(':')[1]
args += [('type', '=', type)]
name = False
except:
pass
if name:
if operator not in expression.NEGATIVE_TERM_OPERATORS:
ids = self.search(cr, user, ['|', ('code', '=like', name+"%"), '|', ('shortcut', '=', name), ('name', operator, name)]+args, limit=limit)
if not ids and len(name.split()) >= 2:
#Separating code and name of account for searching
operand1,operand2 = name.split(' ',1) #name can contain spaces e.g. OpenERP S.A.
ids = self.search(cr, user, [('code', operator, operand1), ('name', operator, operand2)]+ args, limit=limit)
else:
ids = self.search(cr, user, ['&','!', ('code', '=like', name+"%"), ('name', operator, name)]+args, limit=limit)
# as negation want to restric, do if already have results
if ids and len(name.split()) >= 2:
operand1,operand2 = name.split(' ',1) #name can contain spaces e.g. OpenERP S.A.
ids = self.search(cr, user, [('code', operator, operand1), ('name', operator, operand2), ('id', 'in', ids)]+ args, limit=limit)
else:
ids = self.search(cr, user, args, context=context, limit=limit)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name', 'code'], context=context)
res = []
for record in reads:
name = record['name']
if record['code']:
name = record['code'] + ' ' + name
res.append((record['id'], name))
return res
def copy(self, cr, uid, id, default=None, context=None, done_list=None, local=False):
default = {} if default is None else default.copy()
if done_list is None:
done_list = []
account = self.browse(cr, uid, id, context=context)
new_child_ids = []
default.update(code=_("%s (copy)") % (account['code'] or ''))
if not local:
done_list = []
if account.id in done_list:
return False
done_list.append(account.id)
if account:
for child in account.child_id:
child_ids = self.copy(cr, uid, child.id, default, context=context, done_list=done_list, local=True)
if child_ids:
new_child_ids.append(child_ids)
default['child_parent_ids'] = [(6, 0, new_child_ids)]
else:
default['child_parent_ids'] = False
return super(account_account, self).copy(cr, uid, id, default, context=context)
def _check_moves(self, cr, uid, ids, method, context=None):
line_obj = self.pool.get('account.move.line')
account_ids = self.search(cr, uid, [('id', 'child_of', ids)])
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)]):
if method == 'write':
raise osv.except_osv(_('Error!'), _('You cannot deactivate an account that contains journal items.'))
elif method == 'unlink':
raise osv.except_osv(_('Error!'), _('You cannot remove an account that contains journal items.'))
#Checking whether the account is set as a property to any Partner or not
value = 'account.account,' + str(ids[0])
partner_prop_acc = self.pool.get('ir.property').search(cr, uid, [('value_reference','=',value)], context=context)
if partner_prop_acc:
raise osv.except_osv(_('Warning!'), _('You cannot remove/deactivate an account which is set on a customer or supplier.'))
return True
def _check_allow_type_change(self, cr, uid, ids, new_type, context=None):
restricted_groups = ['consolidation','view']
line_obj = self.pool.get('account.move.line')
for account in self.browse(cr, uid, ids, context=context):
old_type = account.type
account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])])
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)]):
#Check for 'Closed' type
if old_type == 'closed' and new_type !='closed':
raise osv.except_osv(_('Warning!'), _("You cannot change the type of account from 'Closed' to any other type as it contains journal items!"))
# Forbid to change an account type for restricted_groups as it contains journal items (or if one of its children does)
if (new_type in restricted_groups):
raise osv.except_osv(_('Warning!'), _("You cannot change the type of account to '%s' type as it contains journal items!") % (new_type,))
return True
# For legal reason (forbiden to modify journal entries which belongs to a closed fy or period), Forbid to modify
# the code of an account if journal entries have been already posted on this account. This cannot be simply
# 'configurable' since it can lead to a lack of confidence in OpenERP and this is what we want to change.
def _check_allow_code_change(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
for account in self.browse(cr, uid, ids, context=context):
account_ids = self.search(cr, uid, [('id', 'child_of', [account.id])], context=context)
if line_obj.search(cr, uid, [('account_id', 'in', account_ids)], context=context):
raise osv.except_osv(_('Warning !'), _("You cannot change the code of account which contains journal items!"))
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
# Dont allow changing the company_id when account_move_line already exist
if 'company_id' in vals:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('account_id', 'in', ids)])
if move_lines:
# Allow the write if the value is the same
for i in [i['company_id'][0] for i in self.read(cr,uid,ids,['company_id'])]:
if vals['company_id']!=i:
raise osv.except_osv(_('Warning!'), _('You cannot change the owner company of an account that already contains journal items.'))
if 'active' in vals and not vals['active']:
self._check_moves(cr, uid, ids, "write", context=context)
if 'type' in vals.keys():
self._check_allow_type_change(cr, uid, ids, vals['type'], context=context)
if 'code' in vals.keys():
self._check_allow_code_change(cr, uid, ids, context=context)
return super(account_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
self._check_moves(cr, uid, ids, "unlink", context=context)
return super(account_account, self).unlink(cr, uid, ids, context=context)
class account_journal(osv.osv):
_name = "account.journal"
_description = "Journal"
_columns = {
'with_last_closing_balance' : fields.boolean('Opening With Last Closing Balance'),
'name': fields.char('Journal Name', size=64, required=True),
'code': fields.char('Code', size=5, required=True, help="The code will be displayed on reports."),
'type': fields.selection([('sale', 'Sale'),('sale_refund','Sale Refund'), ('purchase', 'Purchase'), ('purchase_refund','Purchase Refund'), ('cash', 'Cash'), ('bank', 'Bank and Checks'), ('general', 'General'), ('situation', 'Opening/Closing Situation')], 'Type', size=32, required=True,
help="Select 'Sale' for customer invoices journals."\
" Select 'Purchase' for supplier invoices journals."\
" Select 'Cash' or 'Bank' for journals that are used in customer or supplier payments."\
" Select 'General' for miscellaneous operations journals."\
" Select 'Opening/Closing Situation' for entries generated for new fiscal years."),
'type_control_ids': fields.many2many('account.account.type', 'account_journal_type_rel', 'journal_id','type_id', 'Type Controls', domain=[('code','<>','view'), ('code', '<>', 'closed')]),
'account_control_ids': fields.many2many('account.account', 'account_account_type_rel', 'journal_id','account_id', 'Account', domain=[('type','<>','view'), ('type', '<>', 'closed')]),
'default_credit_account_id': fields.many2one('account.account', 'Default Credit Account', domain="[('type','!=','view')]", help="It acts as a default account for credit amount"),
'default_debit_account_id': fields.many2one('account.account', 'Default Debit Account', domain="[('type','!=','view')]", help="It acts as a default account for debit amount"),
'centralisation': fields.boolean('Centralized Counterpart', help="Check this box to determine that each entry of this journal won't create a new counterpart but will share the same counterpart. This is used in fiscal year closing."),
'update_posted': fields.boolean('Allow Cancelling Entries', help="Check this box if you want to allow the cancellation the entries related to this journal or of the invoice related to this journal"),
'group_invoice_lines': fields.boolean('Group Invoice Lines', help="If this box is checked, the system will try to group the accounting lines when generating them from invoices."),
'sequence_id': fields.many2one('ir.sequence', 'Entry Sequence', help="This field contains the information related to the numbering of the journal entries of this journal.", required=True),
'user_id': fields.many2one('res.users', 'User', help="The user responsible for this journal"),
'groups_id': fields.many2many('res.groups', 'account_journal_group_rel', 'journal_id', 'group_id', 'Groups'),
'currency': fields.many2one('res.currency', 'Currency', help='The currency used to enter statement'),
'entry_posted': fields.boolean('Autopost Created Moves', help='Check this box to automatically post entries of this journal. Note that legally, some entries may be automatically posted when the source document is validated (Invoices), whatever the status of this field.'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, help="Company related to this journal"),
'allow_date':fields.boolean('Check Date in Period', help= 'If checked, the entry won\'t be created if the entry date is not included into the selected period'),
'profit_account_id' : fields.many2one('account.account', 'Profit Account'),
'loss_account_id' : fields.many2one('account.account', 'Loss Account'),
'internal_account_id' : fields.many2one('account.account', 'Internal Transfers Account', select=1),
'cash_control' : fields.boolean('Cash Control', help='If you want the journal should be control at opening/closing, check this option'),
}
_defaults = {
'cash_control' : False,
'with_last_closing_balance' : False,
'user_id': lambda self, cr, uid, context: uid,
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
_sql_constraints = [
('code_company_uniq', 'unique (code, company_id)', 'The code of the journal must be unique per company !'),
('name_company_uniq', 'unique (name, company_id)', 'The name of the journal must be unique per company !'),
]
_order = 'code'
def _check_currency(self, cr, uid, ids, context=None):
for journal in self.browse(cr, uid, ids, context=context):
if journal.currency:
if journal.default_credit_account_id and not journal.default_credit_account_id.currency_id.id == journal.currency.id:
return False
if journal.default_debit_account_id and not journal.default_debit_account_id.currency_id.id == journal.currency.id:
return False
return True
_constraints = [
(_check_currency, 'Configuration error!\nThe currency chosen should be shared by the default accounts too.', ['currency','default_debit_account_id','default_credit_account_id']),
]
def copy(self, cr, uid, id, default=None, context=None, done_list=None, local=False):
default = {} if default is None else default.copy()
if done_list is None:
done_list = []
journal = self.browse(cr, uid, id, context=context)
default.update(
code=_("%s (copy)") % (journal['code'] or ''),
name=_("%s (copy)") % (journal['name'] or ''),
sequence_id=False)
return super(account_journal, self).copy(cr, uid, id, default, context=context)
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
for journal in self.browse(cr, uid, ids, context=context):
if 'company_id' in vals and journal.company_id.id != vals['company_id']:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('journal_id', 'in', ids)])
if move_lines:
raise osv.except_osv(_('Warning!'), _('This journal already contains items, therefore you cannot modify its company field.'))
return super(account_journal, self).write(cr, uid, ids, vals, context=context)
def create_sequence(self, cr, uid, vals, context=None):
""" Create new no_gap entry sequence for every new Joural
"""
# in account.journal code is actually the prefix of the sequence
# whereas ir.sequence code is a key to lookup global sequences.
prefix = vals['code'].upper()
seq = {
'name': vals['name'],
'implementation':'no_gap',
'prefix': prefix + "/%(year)s/",
'padding': 4,
'number_increment': 1
}
if 'company_id' in vals:
seq['company_id'] = vals['company_id']
return self.pool.get('ir.sequence').create(cr, uid, seq)
def create(self, cr, uid, vals, context=None):
if not 'sequence_id' in vals or not vals['sequence_id']:
# if we have the right to create a journal, we should be able to
# create it's sequence.
vals.update({'sequence_id': self.create_sequence(cr, SUPERUSER_ID, vals, context)})
return super(account_journal, self).create(cr, uid, vals, context)
def name_get(self, cr, user, ids, context=None):
"""
Returns a list of tupples containing id, name.
result format: {[(id, name), (id, name), ...]}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param ids: list of ids for which name should be read
@param context: context arguments, like lang, time zone
@return: Returns a list of tupples containing id, name
"""
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
result = self.browse(cr, user, ids, context=context)
res = []
for rs in result:
if rs.currency:
currency = rs.currency
else:
currency = rs.company_id.currency_id
name = "%s (%s)" % (rs.name, currency.name)
res += [(rs.id, name)]
return res
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
class account_fiscalyear(osv.osv):
_name = "account.fiscalyear"
_description = "Fiscal Year"
_columns = {
'name': fields.char('Fiscal Year', size=64, required=True),
'code': fields.char('Code', size=6, required=True),
'company_id': fields.many2one('res.company', 'Company', required=True),
'date_start': fields.date('Start Date', required=True),
'date_stop': fields.date('End Date', required=True),
'period_ids': fields.one2many('account.period', 'fiscalyear_id', 'Periods'),
'state': fields.selection([('draft','Open'), ('done','Closed')], 'Status', readonly=True),
}
_defaults = {
'state': 'draft',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
_order = "date_start, id"
def _check_duration(self, cr, uid, ids, context=None):
obj_fy = self.browse(cr, uid, ids[0], context=context)
if obj_fy.date_stop < obj_fy.date_start:
return False
return True
_constraints = [
(_check_duration, 'Error!\nThe start date of a fiscal year must precede its end date.', ['date_start','date_stop'])
]
def create_period3(self, cr, uid, ids, context=None):
return self.create_period(cr, uid, ids, context, 3)
def create_period(self, cr, uid, ids, context=None, interval=1):
period_obj = self.pool.get('account.period')
for fy in self.browse(cr, uid, ids, context=context):
ds = datetime.strptime(fy.date_start, '%Y-%m-%d')
period_obj.create(cr, uid, {
'name': "%s %s" % (_('Opening Period'), ds.strftime('%Y')),
'code': ds.strftime('00/%Y'),
'date_start': ds,
'date_stop': ds,
'special': True,
'fiscalyear_id': fy.id,
})
while ds.strftime('%Y-%m-%d') < fy.date_stop:
de = ds + relativedelta(months=interval, days=-1)
if de.strftime('%Y-%m-%d') > fy.date_stop:
de = datetime.strptime(fy.date_stop, '%Y-%m-%d')
period_obj.create(cr, uid, {
'name': ds.strftime('%m/%Y'),
'code': ds.strftime('%m/%Y'),
'date_start': ds.strftime('%Y-%m-%d'),
'date_stop': de.strftime('%Y-%m-%d'),
'fiscalyear_id': fy.id,
})
ds = ds + relativedelta(months=interval)
return True
def find(self, cr, uid, dt=None, exception=True, context=None):
res = self.finds(cr, uid, dt, exception, context=context)
return res and res[0] or False
def finds(self, cr, uid, dt=None, exception=True, context=None):
if context is None: context = {}
if not dt:
dt = fields.date.context_today(self,cr,uid,context=context)
args = [('date_start', '<=' ,dt), ('date_stop', '>=', dt)]
if context.get('company_id', False):
company_id = context['company_id']
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
args.append(('company_id', '=', company_id))
ids = self.search(cr, uid, args, context=context)
if not ids:
if exception:
raise osv.except_osv(_('Error!'), _('There is no fiscal year defined for this date.\nPlease create one from the configuration of the accounting menu.'))
else:
return []
return ids
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if args is None:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
class account_period(osv.osv):
_name = "account.period"
_description = "Account period"
_columns = {
'name': fields.char('Period Name', size=64, required=True),
'code': fields.char('Code', size=12),
'special': fields.boolean('Opening/Closing Period', size=12,
help="These periods can overlap."),
'date_start': fields.date('Start of Period', required=True, states={'done':[('readonly',True)]}),
'date_stop': fields.date('End of Period', required=True, states={'done':[('readonly',True)]}),
'fiscalyear_id': fields.many2one('account.fiscalyear', 'Fiscal Year', required=True, states={'done':[('readonly',True)]}, select=True),
'state': fields.selection([('draft','Open'), ('done','Closed')], 'Status', readonly=True,
help='When monthly periods are created. The status is \'Draft\'. At the end of monthly period it is in \'Done\' status.'),
'company_id': fields.related('fiscalyear_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
_defaults = {
'state': 'draft',
}
_order = "date_start, special desc"
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id)', 'The name of the period must be unique per company!'),
]
def _check_duration(self,cr,uid,ids,context=None):
obj_period = self.browse(cr, uid, ids[0], context=context)
if obj_period.date_stop < obj_period.date_start:
return False
return True
def _check_year_limit(self,cr,uid,ids,context=None):
for obj_period in self.browse(cr, uid, ids, context=context):
if obj_period.special:
continue
if obj_period.fiscalyear_id.date_stop < obj_period.date_stop or \
obj_period.fiscalyear_id.date_stop < obj_period.date_start or \
obj_period.fiscalyear_id.date_start > obj_period.date_start or \
obj_period.fiscalyear_id.date_start > obj_period.date_stop:
return False
pids = self.search(cr, uid, [('date_stop','>=',obj_period.date_start),('date_start','<=',obj_period.date_stop),('special','=',False),('id','<>',obj_period.id)])
for period in self.browse(cr, uid, pids):
if period.fiscalyear_id.company_id.id==obj_period.fiscalyear_id.company_id.id:
return False
return True
_constraints = [
(_check_duration, 'Error!\nThe duration of the Period(s) is/are invalid.', ['date_stop']),
(_check_year_limit, 'Error!\nThe period is invalid. Either some periods are overlapping or the period\'s dates are not matching the scope of the fiscal year.', ['date_stop'])
]
def next(self, cr, uid, period, step, context=None):
ids = self.search(cr, uid, [('date_start','>',period.date_start)])
if len(ids)>=step:
return ids[step-1]
return False
def find(self, cr, uid, dt=None, context=None):
if context is None: context = {}
if not dt:
dt = fields.date.context_today(self, cr, uid, context=context)
args = [('date_start', '<=' ,dt), ('date_stop', '>=', dt)]
if context.get('company_id', False):
args.append(('company_id', '=', context['company_id']))
else:
company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
args.append(('company_id', '=', company_id))
result = []
if context.get('account_period_prefer_normal', True):
# look for non-special periods first, and fallback to all if no result is found
result = self.search(cr, uid, args + [('special', '=', False)], context=context)
if not result:
result = self.search(cr, uid, args, context=context)
if not result:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_fiscalyear')
msg = _('There is no period defined for this date: %s.\nPlease, go to Configuration/Periods and configure a fiscal year.') % dt
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
return result
def action_draft(self, cr, uid, ids, *args):
mode = 'draft'
for period in self.browse(cr, uid, ids):
if period.fiscalyear_id.state == 'done':
raise osv.except_osv(_('Warning!'), _('You can not re-open a period which belongs to closed fiscal year'))
cr.execute('update account_journal_period set state=%s where period_id in %s', (mode, tuple(ids),))
cr.execute('update account_period set state=%s where id in %s', (mode, tuple(ids),))
return True
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if 'company_id' in vals:
move_lines = self.pool.get('account.move.line').search(cr, uid, [('period_id', 'in', ids)])
if move_lines:
raise osv.except_osv(_('Warning!'), _('This journal already contains items for this period, therefore you cannot modify its company field.'))
return super(account_period, self).write(cr, uid, ids, vals, context=context)
def build_ctx_periods(self, cr, uid, period_from_id, period_to_id):
if period_from_id == period_to_id:
return [period_from_id]
period_from = self.browse(cr, uid, period_from_id)
period_date_start = period_from.date_start
company1_id = period_from.company_id.id
period_to = self.browse(cr, uid, period_to_id)
period_date_stop = period_to.date_stop
company2_id = period_to.company_id.id
if company1_id != company2_id:
raise osv.except_osv(_('Error!'), _('You should choose the periods that belong to the same company.'))
if period_date_start > period_date_stop:
raise osv.except_osv(_('Error!'), _('Start period should precede then end period.'))
# /!\ We do not include a criterion on the company_id field below, to allow producing consolidated reports
# on multiple companies. It will only work when start/end periods are selected and no fiscal year is chosen.
#for period from = january, we want to exclude the opening period (but it has same date_from, so we have to check if period_from is special or not to include that clause or not in the search).
if period_from.special:
return self.search(cr, uid, [('date_start', '>=', period_date_start), ('date_stop', '<=', period_date_stop)])
return self.search(cr, uid, [('date_start', '>=', period_date_start), ('date_stop', '<=', period_date_stop), ('special', '=', False)])
class account_journal_period(osv.osv):
_name = "account.journal.period"
_description = "Journal Period"
def _icon_get(self, cr, uid, ids, field_name, arg=None, context=None):
result = {}.fromkeys(ids, 'STOCK_NEW')
for r in self.read(cr, uid, ids, ['state']):
result[r['id']] = {
'draft': 'STOCK_NEW',
'printed': 'STOCK_PRINT_PREVIEW',
'done': 'STOCK_DIALOG_AUTHENTICATION',
}.get(r['state'], 'STOCK_NEW')
return result
_columns = {
'name': fields.char('Journal-Period Name', size=64, required=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, ondelete="cascade"),
'period_id': fields.many2one('account.period', 'Period', required=True, ondelete="cascade"),
'icon': fields.function(_icon_get, string='Icon', type='char', size=32),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the journal period without removing it."),
'state': fields.selection([('draft','Draft'), ('printed','Printed'), ('done','Done')], 'Status', required=True, readonly=True,
help='When journal period is created. The status is \'Draft\'. If a report is printed it comes to \'Printed\' status. When all transactions are done, it comes in \'Done\' status.'),
'fiscalyear_id': fields.related('period_id', 'fiscalyear_id', string='Fiscal Year', type='many2one', relation='account.fiscalyear'),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True)
}
def _check(self, cr, uid, ids, context=None):
for obj in self.browse(cr, uid, ids, context=context):
cr.execute('select * from account_move_line where journal_id=%s and period_id=%s limit 1', (obj.journal_id.id, obj.period_id.id))
res = cr.fetchall()
if res:
raise osv.except_osv(_('Error!'), _('You cannot modify/delete a journal with entries for this period.'))
return True
def write(self, cr, uid, ids, vals, context=None):
self._check(cr, uid, ids, context=context)
return super(account_journal_period, self).write(cr, uid, ids, vals, context=context)
def create(self, cr, uid, vals, context=None):
period_id = vals.get('period_id',False)
if period_id:
period = self.pool.get('account.period').browse(cr, uid, period_id, context=context)
vals['state']=period.state
return super(account_journal_period, self).create(cr, uid, vals, context)
def unlink(self, cr, uid, ids, context=None):
self._check(cr, uid, ids, context=context)
return super(account_journal_period, self).unlink(cr, uid, ids, context=context)
_defaults = {
'state': 'draft',
'active': True,
}
_order = "period_id"
class account_fiscalyear(osv.osv):
_inherit = "account.fiscalyear"
_description = "Fiscal Year"
_columns = {
'end_journal_period_id':fields.many2one('account.journal.period','End of Year Entries Journal', readonly=True),
}
def copy(self, cr, uid, id, default=None, context=None):
default = {} if default is None else default.copy()
default.update({
'period_ids': [],
'end_journal_period_id': False
})
return super(account_fiscalyear, self).copy(cr, uid, id, default=default, context=context)
#----------------------------------------------------------
# Entries
#----------------------------------------------------------
class account_move(osv.osv):
_name = "account.move"
_description = "Account Entry"
_order = 'id desc'
def account_move_prepare(self, cr, uid, journal_id, date=False, ref='', company_id=False, context=None):
'''
Prepares and returns a dictionary of values, ready to be passed to create() based on the parameters received.
'''
if not date:
date = fields.date.today()
period_obj = self.pool.get('account.period')
if not company_id:
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = user.company_id.id
if context is None:
context = {}
#put the company in context to find the good period
ctx = context.copy()
ctx.update({'company_id': company_id})
return {
'journal_id': journal_id,
'date': date,
'period_id': period_obj.find(cr, uid, date, context=ctx)[0],
'ref': ref,
'company_id': company_id,
}
def name_get(self, cursor, user, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
res = []
data_move = self.pool.get('account.move').browse(cursor, user, ids, context=context)
for move in data_move:
if move.state=='draft':
name = '*' + str(move.id)
else:
name = move.name
res.append((move.id, name))
return res
def _get_period(self, cr, uid, context=None):
ctx = dict(context or {})
period_ids = self.pool.get('account.period').find(cr, uid, context=ctx)
return period_ids[0]
def _amount_compute(self, cr, uid, ids, name, args, context, where =''):
if not ids: return {}
cr.execute( 'SELECT move_id, SUM(debit) '\
'FROM account_move_line '\
'WHERE move_id IN %s '\
'GROUP BY move_id', (tuple(ids),))
result = dict(cr.fetchall())
for id in ids:
result.setdefault(id, 0.0)
return result
def _search_amount(self, cr, uid, obj, name, args, context):
ids = set()
for cond in args:
amount = cond[2]
if isinstance(cond[2],(list,tuple)):
if cond[1] in ['in','not in']:
amount = tuple(cond[2])
else:
continue
else:
if cond[1] in ['=like', 'like', 'not like', 'ilike', 'not ilike', 'in', 'not in', 'child_of']:
continue
cr.execute("select move_id from account_move_line group by move_id having sum(debit) %s %%s" % (cond[1]),(amount,))
res_ids = set(id[0] for id in cr.fetchall())
ids = ids and (ids & res_ids) or res_ids
if ids:
return [('id', 'in', tuple(ids))]
return [('id', '=', '0')]
def _get_move_from_lines(self, cr, uid, ids, context=None):
line_obj = self.pool.get('account.move.line')
return [line.move_id.id for line in line_obj.browse(cr, uid, ids, context=context)]
_columns = {
'name': fields.char('Number', size=64, required=True),
'ref': fields.char('Reference', size=64),
'period_id': fields.many2one('account.period', 'Period', required=True, states={'posted':[('readonly',True)]}),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, states={'posted':[('readonly',True)]}),
'state': fields.selection([('draft','Unposted'), ('posted','Posted')], 'Status', required=True, readonly=True,
help='All manually created new journal entries are usually in the status \'Unposted\', but you can set the option to skip that status on the related journal. In that case, they will behave as journal entries automatically created by the system on document validation (invoices, bank statements...) and will be created in \'Posted\' status.'),
'line_id': fields.one2many('account.move.line', 'move_id', 'Entries', states={'posted':[('readonly',True)]}),
'to_check': fields.boolean('To Review', help='Check this box if you are unsure of that journal entry and if you want to note it as \'to be reviewed\' by an accounting expert.'),
'partner_id': fields.related('line_id', 'partner_id', type="many2one", relation="res.partner", string="Partner", store={
_name: (lambda self, cr,uid,ids,c: ids, ['line_id'], 10),
'account.move.line': (_get_move_from_lines, ['partner_id'],10)
}),
'amount': fields.function(_amount_compute, string='Amount', digits_compute=dp.get_precision('Account'), type='float', fnct_search=_search_amount),
'date': fields.date('Date', required=True, states={'posted':[('readonly',True)]}, select=True),
'narration':fields.text('Internal Note'),
'company_id': fields.related('journal_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'balance': fields.float('balance', digits_compute=dp.get_precision('Account'), help="This is a field only used for internal purpose and shouldn't be displayed"),
}
_defaults = {
'name': '/',
'state': 'draft',
'period_id': _get_period,
'date': fields.date.context_today,
'company_id': lambda self, cr, uid, c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
def _check_centralisation(self, cursor, user, ids, context=None):
for move in self.browse(cursor, user, ids, context=context):
if move.journal_id.centralisation:
move_ids = self.search(cursor, user, [
('period_id', '=', move.period_id.id),
('journal_id', '=', move.journal_id.id),
])
if len(move_ids) > 1:
return False
return True
_constraints = [
(_check_centralisation,
'You cannot create more than one move per period on a centralized journal.',
['journal_id']),
]
def post(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoice = context.get('invoice', False)
valid_moves = self.validate(cr, uid, ids, context)
if not valid_moves:
raise osv.except_osv(_('Error!'), _('You cannot validate a non-balanced entry.\nMake sure you have configured payment terms properly.\nThe latest payment term line should be of the "Balance" type.'))
obj_sequence = self.pool.get('ir.sequence')
for move in self.browse(cr, uid, valid_moves, context=context):
if move.name =='/':
new_name = False
journal = move.journal_id
if invoice and invoice.internal_number:
new_name = invoice.internal_number
else:
if journal.sequence_id:
c = {'fiscalyear_id': move.period_id.fiscalyear_id.id}
new_name = obj_sequence.next_by_id(cr, uid, journal.sequence_id.id, c)
else:
raise osv.except_osv(_('Error!'), _('Please define a sequence on the journal.'))
if new_name:
self.write(cr, uid, [move.id], {'name':new_name})
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s',
('posted', tuple(valid_moves),))
return True
def button_validate(self, cursor, user, ids, context=None):
for move in self.browse(cursor, user, ids, context=context):
# check that all accounts have the same topmost ancestor
top_common = None
for line in move.line_id:
account = line.account_id
top_account = account
while top_account.parent_id:
top_account = top_account.parent_id
if not top_common:
top_common = top_account
elif top_account.id != top_common.id:
raise osv.except_osv(_('Error!'),
_('You cannot validate this journal entry because account "%s" does not belong to chart of accounts "%s".') % (account.name, top_common.name))
return self.post(cursor, user, ids, context=context)
def button_cancel(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if not line.journal_id.update_posted:
raise osv.except_osv(_('Error!'), _('You cannot modify a posted entry of this journal.\nFirst you should set the journal to allow cancelling entries.'))
if ids:
cr.execute('UPDATE account_move '\
'SET state=%s '\
'WHERE id IN %s', ('draft', tuple(ids),))
return True
def write(self, cr, uid, ids, vals, context=None):
if context is None:
context = {}
c = context.copy()
c['novalidate'] = True
result = super(account_move, self).write(cr, uid, ids, vals, c)
self.validate(cr, uid, ids, context=context)
return result
#
# TODO: Check if period is closed !
#
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if 'line_id' in vals and context.get('copy'):
for l in vals['line_id']:
if not l[0]:
l[2].update({
'reconcile_id':False,
'reconcile_partial_id':False,
'analytic_lines':False,
'invoice':False,
'ref':False,
'balance':False,
'account_tax_id':False,
'statement_id': False,
})
if 'journal_id' in vals and vals.get('journal_id', False):
for l in vals['line_id']:
if not l[0]:
l[2]['journal_id'] = vals['journal_id']
context['journal_id'] = vals['journal_id']
if 'period_id' in vals:
for l in vals['line_id']:
if not l[0]:
l[2]['period_id'] = vals['period_id']
context['period_id'] = vals['period_id']
else:
default_period = self._get_period(cr, uid, context)
for l in vals['line_id']:
if not l[0]:
l[2]['period_id'] = default_period
context['period_id'] = default_period
if vals.get('line_id', False):
c = context.copy()
c['novalidate'] = True
c['period_id'] = vals['period_id'] if 'period_id' in vals else self._get_period(cr, uid, context)
c['journal_id'] = vals['journal_id']
if 'date' in vals: c['date'] = vals['date']
result = super(account_move, self).create(cr, uid, vals, c)
tmp = self.validate(cr, uid, [result], context)
journal = self.pool.get('account.journal').browse(cr, uid, vals['journal_id'], context)
if journal.entry_posted and tmp:
self.button_validate(cr,uid, [result], context)
else:
result = super(account_move, self).create(cr, uid, vals, context)
return result
def copy(self, cr, uid, id, default=None, context=None):
default = {} if default is None else default.copy()
context = {} if context is None else context.copy()
default.update({
'state':'draft',
'ref': False,
'name':'/',
})
context.update({
'copy':True
})
return super(account_move, self).copy(cr, uid, id, default, context)
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
toremove = []
obj_move_line = self.pool.get('account.move.line')
for move in self.browse(cr, uid, ids, context=context):
if move['state'] != 'draft':
raise osv.except_osv(_('User Error!'),
_('You cannot delete a posted journal entry "%s".') % \
move['name'])
for line in move.line_id:
if line.invoice:
raise osv.except_osv(_('User Error!'),
_("Move cannot be deleted if linked to an invoice. (Invoice: %s - Move ID:%s)") % \
(line.invoice.number,move.name))
line_ids = map(lambda x: x.id, move.line_id)
context['journal_id'] = move.journal_id.id
context['period_id'] = move.period_id.id
obj_move_line._update_check(cr, uid, line_ids, context)
obj_move_line.unlink(cr, uid, line_ids, context=context)
toremove.append(move.id)
result = super(account_move, self).unlink(cr, uid, toremove, context)
return result
def _compute_balance(self, cr, uid, id, context=None):
move = self.browse(cr, uid, id, context=context)
amount = 0
for line in move.line_id:
amount+= (line.debit - line.credit)
return amount
def _centralise(self, cr, uid, move, mode, context=None):
assert mode in ('debit', 'credit'), 'Invalid Mode' #to prevent sql injection
currency_obj = self.pool.get('res.currency')
if context is None:
context = {}
if mode=='credit':
account_id = move.journal_id.default_debit_account_id.id
mode2 = 'debit'
if not account_id:
raise osv.except_osv(_('User Error!'),
_('There is no default debit account defined \n' \
'on journal "%s".') % move.journal_id.name)
else:
account_id = move.journal_id.default_credit_account_id.id
mode2 = 'credit'
if not account_id:
raise osv.except_osv(_('User Error!'),
_('There is no default credit account defined \n' \
'on journal "%s".') % move.journal_id.name)
# find the first line of this move with the current mode
# or create it if it doesn't exist
cr.execute('select id from account_move_line where move_id=%s and centralisation=%s limit 1', (move.id, mode))
res = cr.fetchone()
if res:
line_id = res[0]
else:
context.update({'journal_id': move.journal_id.id, 'period_id': move.period_id.id})
line_id = self.pool.get('account.move.line').create(cr, uid, {
'name': _(mode.capitalize()+' Centralisation'),
'centralisation': mode,
'partner_id': False,
'account_id': account_id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'date': move.period_id.date_stop,
'debit': 0.0,
'credit': 0.0,
}, context)
# find the first line of this move with the other mode
# so that we can exclude it from our calculation
cr.execute('select id from account_move_line where move_id=%s and centralisation=%s limit 1', (move.id, mode2))
res = cr.fetchone()
if res:
line_id2 = res[0]
else:
line_id2 = 0
cr.execute('SELECT SUM(%s) FROM account_move_line WHERE move_id=%%s AND id!=%%s' % (mode,), (move.id, line_id2))
result = cr.fetchone()[0] or 0.0
cr.execute('update account_move_line set '+mode2+'=%s where id=%s', (result, line_id))
#adjust also the amount in currency if needed
cr.execute("select currency_id, sum(amount_currency) as amount_currency from account_move_line where move_id = %s and currency_id is not null group by currency_id", (move.id,))
for row in cr.dictfetchall():
currency_id = currency_obj.browse(cr, uid, row['currency_id'], context=context)
if not currency_obj.is_zero(cr, uid, currency_id, row['amount_currency']):
amount_currency = row['amount_currency'] * -1
account_id = amount_currency > 0 and move.journal_id.default_debit_account_id.id or move.journal_id.default_credit_account_id.id
cr.execute('select id from account_move_line where move_id=%s and centralisation=\'currency\' and currency_id = %slimit 1', (move.id, row['currency_id']))
res = cr.fetchone()
if res:
cr.execute('update account_move_line set amount_currency=%s , account_id=%s where id=%s', (amount_currency, account_id, res[0]))
else:
context.update({'journal_id': move.journal_id.id, 'period_id': move.period_id.id})
line_id = self.pool.get('account.move.line').create(cr, uid, {
'name': _('Currency Adjustment'),
'centralisation': 'currency',
'partner_id': False,
'account_id': account_id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'date': move.period_id.date_stop,
'debit': 0.0,
'credit': 0.0,
'currency_id': row['currency_id'],
'amount_currency': amount_currency,
}, context)
return True
#
# Validate a balanced move. If it is a centralised journal, create a move.
#
def validate(self, cr, uid, ids, context=None):
if context and ('__last_update' in context):
del context['__last_update']
valid_moves = [] #Maintains a list of moves which can be responsible to create analytic entries
obj_analytic_line = self.pool.get('account.analytic.line')
obj_move_line = self.pool.get('account.move.line')
for move in self.browse(cr, uid, ids, context):
journal = move.journal_id
amount = 0
line_ids = []
line_draft_ids = []
company_id = None
for line in move.line_id:
amount += line.debit - line.credit
line_ids.append(line.id)
if line.state=='draft':
line_draft_ids.append(line.id)
if not company_id:
company_id = line.account_id.company_id.id
if not company_id == line.account_id.company_id.id:
raise osv.except_osv(_('Error!'), _("Cannot create moves for different companies."))
if line.account_id.currency_id and line.currency_id:
if line.account_id.currency_id.id != line.currency_id.id and (line.account_id.currency_id.id != line.account_id.company_id.currency_id.id):
raise osv.except_osv(_('Error!'), _("""Cannot create move with currency different from ..""") % (line.account_id.code, line.account_id.name))
if abs(amount) < 10 ** -4:
# If the move is balanced
# Add to the list of valid moves
# (analytic lines will be created later for valid moves)
valid_moves.append(move)
# Check whether the move lines are confirmed
if not line_draft_ids:
continue
# Update the move lines (set them as valid)
obj_move_line.write(cr, uid, line_draft_ids, {
'state': 'valid'
}, context, check=False)
account = {}
account2 = {}
if journal.type in ('purchase','sale'):
for line in move.line_id:
code = amount = 0
key = (line.account_id.id, line.tax_code_id.id)
if key in account2:
code = account2[key][0]
amount = account2[key][1] * (line.debit + line.credit)
elif line.account_id.id in account:
code = account[line.account_id.id][0]
amount = account[line.account_id.id][1] * (line.debit + line.credit)
if (code or amount) and not (line.tax_code_id or line.tax_amount):
obj_move_line.write(cr, uid, [line.id], {
'tax_code_id': code,
'tax_amount': amount
}, context, check=False)
elif journal.centralisation:
# If the move is not balanced, it must be centralised...
# Add to the list of valid moves
# (analytic lines will be created later for valid moves)
valid_moves.append(move)
#
# Update the move lines (set them as valid)
#
self._centralise(cr, uid, move, 'debit', context=context)
self._centralise(cr, uid, move, 'credit', context=context)
obj_move_line.write(cr, uid, line_draft_ids, {
'state': 'valid'
}, context, check=False)
else:
# We can't validate it (it's unbalanced)
# Setting the lines as draft
not_draft_line_ids = list(set(line_ids) - set(line_draft_ids))
if not_draft_line_ids:
obj_move_line.write(cr, uid, not_draft_line_ids, {
'state': 'draft'
}, context, check=False)
# Create analytic lines for the valid moves
for record in valid_moves:
obj_move_line.create_analytic_lines(cr, uid, [line.id for line in record.line_id], context)
valid_moves = [move.id for move in valid_moves]
return len(valid_moves) > 0 and valid_moves or False
class account_move_reconcile(osv.osv):
_name = "account.move.reconcile"
_description = "Account Reconciliation"
_columns = {
'name': fields.char('Name', size=64, required=True),
'type': fields.char('Type', size=16, required=True),
'line_id': fields.one2many('account.move.line', 'reconcile_id', 'Entry Lines'),
'line_partial_ids': fields.one2many('account.move.line', 'reconcile_partial_id', 'Partial Entry lines'),
'create_date': fields.date('Creation date', readonly=True),
'opening_reconciliation': fields.boolean('Opening Entries Reconciliation', help="Is this reconciliation produced by the opening of a new fiscal year ?."),
}
_defaults = {
'name': lambda self,cr,uid,ctx=None: self.pool.get('ir.sequence').get(cr, uid, 'account.reconcile', context=ctx) or '/',
}
# You cannot unlink a reconciliation if it is a opening_reconciliation one,
# you should use the generate opening entries wizard for that
def unlink(self, cr, uid, ids, context=None):
for move_rec in self.browse(cr, uid, ids, context=context):
if move_rec.opening_reconciliation:
raise osv.except_osv(_('Error!'), _('You cannot unreconcile journal items if they has been generated by the \
opening/closing fiscal year process.'))
return super(account_move_reconcile, self).unlink(cr, uid, ids, context=context)
# Look in the line_id and line_partial_ids to ensure the partner is the same or empty
# on all lines. We allow that only for opening/closing period
def _check_same_partner(self, cr, uid, ids, context=None):
for reconcile in self.browse(cr, uid, ids, context=context):
move_lines = []
if not reconcile.opening_reconciliation:
if reconcile.line_id:
first_partner = reconcile.line_id[0].partner_id.id
move_lines = reconcile.line_id
elif reconcile.line_partial_ids:
first_partner = reconcile.line_partial_ids[0].partner_id.id
move_lines = reconcile.line_partial_ids
if any([(line.account_id.type in ('receivable', 'payable') and line.partner_id.id != first_partner) for line in move_lines]):
return False
return True
_constraints = [
(_check_same_partner, 'You can only reconcile journal items with the same partner.', ['line_id']),
]
def reconcile_partial_check(self, cr, uid, ids, type='auto', context=None):
total = 0.0
for rec in self.browse(cr, uid, ids, context=context):
for line in rec.line_partial_ids:
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if not total:
self.pool.get('account.move.line').write(cr, uid,
map(lambda x: x.id, rec.line_partial_ids),
{'reconcile_id': rec.id }
)
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for r in self.browse(cr, uid, ids, context=context):
total = reduce(lambda y,t: (t.debit or 0.0) - (t.credit or 0.0) + y, r.line_partial_ids, 0.0)
if total:
name = '%s (%.2f)' % (r.name, total)
result.append((r.id,name))
else:
result.append((r.id,r.name))
return result
#----------------------------------------------------------
# Tax
#----------------------------------------------------------
"""
a documenter
child_depend: la taxe depend des taxes filles
"""
class account_tax_code(osv.osv):
"""
A code for the tax object.
This code is used for some tax declarations.
"""
def _sum(self, cr, uid, ids, name, args, context, where ='', where_params=()):
parent_ids = tuple(self.search(cr, uid, [('parent_id', 'child_of', ids)]))
if context.get('based_on', 'invoices') == 'payments':
cr.execute('SELECT line.tax_code_id, sum(line.tax_amount) \
FROM account_move_line AS line, \
account_move AS move \
LEFT JOIN account_invoice invoice ON \
(invoice.move_id = move.id) \
WHERE line.tax_code_id IN %s '+where+' \
AND move.id = line.move_id \
AND ((invoice.state = \'paid\') \
OR (invoice.id IS NULL)) \
GROUP BY line.tax_code_id',
(parent_ids,) + where_params)
else:
cr.execute('SELECT line.tax_code_id, sum(line.tax_amount) \
FROM account_move_line AS line, \
account_move AS move \
WHERE line.tax_code_id IN %s '+where+' \
AND move.id = line.move_id \
GROUP BY line.tax_code_id',
(parent_ids,) + where_params)
res=dict(cr.fetchall())
obj_precision = self.pool.get('decimal.precision')
res2 = {}
for record in self.browse(cr, uid, ids, context=context):
def _rec_get(record):
amount = res.get(record.id, 0.0)
for rec in record.child_ids:
amount += _rec_get(rec) * rec.sign
return amount
res2[record.id] = round(_rec_get(record), obj_precision.precision_get(cr, uid, 'Account'))
return res2
def _sum_year(self, cr, uid, ids, name, args, context=None):
if context is None:
context = {}
move_state = ('posted', )
if context.get('state', 'all') == 'all':
move_state = ('draft', 'posted', )
if context.get('fiscalyear_id', False):
fiscalyear_id = [context['fiscalyear_id']]
else:
fiscalyear_id = self.pool.get('account.fiscalyear').finds(cr, uid, exception=False)
where = ''
where_params = ()
if fiscalyear_id:
pids = []
for fy in fiscalyear_id:
pids += map(lambda x: str(x.id), self.pool.get('account.fiscalyear').browse(cr, uid, fy).period_ids)
if pids:
where = ' AND line.period_id IN %s AND move.state IN %s '
where_params = (tuple(pids), move_state)
return self._sum(cr, uid, ids, name, args, context,
where=where, where_params=where_params)
def _sum_period(self, cr, uid, ids, name, args, context):
if context is None:
context = {}
move_state = ('posted', )
if context.get('state', False) == 'all':
move_state = ('draft', 'posted', )
if context.get('period_id', False):
period_id = context['period_id']
else:
period_id = self.pool.get('account.period').find(cr, uid, context=context)
if not period_id:
return dict.fromkeys(ids, 0.0)
period_id = period_id[0]
return self._sum(cr, uid, ids, name, args, context,
where=' AND line.period_id=%s AND move.state IN %s', where_params=(period_id, move_state))
_name = 'account.tax.code'
_description = 'Tax Code'
_rec_name = 'code'
_columns = {
'name': fields.char('Tax Case Name', size=64, required=True, translate=True),
'code': fields.char('Case Code', size=64),
'info': fields.text('Description'),
'sum': fields.function(_sum_year, string="Year Sum"),
'sum_period': fields.function(_sum_period, string="Period Sum"),
'parent_id': fields.many2one('account.tax.code', 'Parent Code', select=True),
'child_ids': fields.one2many('account.tax.code', 'parent_id', 'Child Codes'),
'line_ids': fields.one2many('account.move.line', 'tax_code_id', 'Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
'sign': fields.float('Coefficent for parent', required=True, help='You can specify here the coefficient that will be used when consolidating the amount of this case into its parent. For example, set 1/-1 if you want to add/substract it.'),
'notprintable':fields.boolean("Not Printable in Invoice", help="Check this box if you don't want any tax related to this tax code to appear on invoices"),
'sequence': fields.integer('Sequence', help="Determine the display order in the report 'Accounting \ Reporting \ Generic Reporting \ Taxes \ Taxes Report'"),
}
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('code', operator, name), ('name', operator, name)]
else:
domain = ['|', ('code', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def name_get(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','code'], context, load='_classic_write')
return [(x['id'], (x['code'] and (x['code'] + ' - ') or '') + x['name']) \
for x in reads]
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'company_id': _default_company,
'sign': 1.0,
'notprintable': False,
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
default = default.copy()
default.update({'line_ids': []})
return super(account_tax_code, self).copy(cr, uid, id, default, context)
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive accounts.', ['parent_id'])
]
_order = 'code'
def get_precision_tax():
def change_digit_tax(cr):
res = openerp.registry(cr.dbname)['decimal.precision'].precision_get(cr, SUPERUSER_ID, 'Account')
return (16, res+3)
return change_digit_tax
class account_tax(osv.osv):
"""
A tax object.
Type: percent, fixed, none, code
PERCENT: tax = price * amount
FIXED: tax = price + amount
NONE: no tax line
CODE: execute python code. localcontext = {'price_unit':pu}
return result in the context
Ex: result=round(price_unit*0.21,4)
"""
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
name = self.read(cr, uid, id, ['name'], context=context)['name']
default = default.copy()
default.update({'name': name + _(' (Copy)')})
return super(account_tax, self).copy_data(cr, uid, id, default=default, context=context)
_name = 'account.tax'
_description = 'Tax'
_columns = {
'name': fields.char('Tax Name', size=64, required=True, translate=True, help="This name will be displayed on reports"),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the tax lines from the lowest sequences to the higher ones. The order is important if you have a tax with several tax children. In this case, the evaluation order is important."),
'amount': fields.float('Amount', required=True, digits_compute=get_precision_tax(), help="For taxes of type percentage, enter % ratio between 0-1."),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the tax without removing it."),
'type': fields.selection( [('percent','Percentage'), ('fixed','Fixed Amount'), ('none','None'), ('code','Python Code'), ('balance','Balance')], 'Tax Type', required=True,
help="The computation method for the tax amount."),
'applicable_type': fields.selection( [('true','Always'), ('code','Given by Python Code')], 'Applicability', required=True,
help="If not applicable (computed through a Python code), the tax won't appear on the invoice."),
'domain':fields.char('Domain', size=32, help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'account_collected_id':fields.many2one('account.account', 'Invoice Tax Account', help="Set the account that will be set by default on invoice tax lines for invoices. Leave empty to use the expense account."),
'account_paid_id':fields.many2one('account.account', 'Refund Tax Account', help="Set the account that will be set by default on invoice tax lines for refunds. Leave empty to use the expense account."),
'account_analytic_collected_id':fields.many2one('account.analytic.account', 'Invoice Tax Analytic Account', help="Set the analytic account that will be used by default on the invoice tax lines for invoices. Leave empty if you don't want to use an analytic account on the invoice tax lines by default."),
'account_analytic_paid_id':fields.many2one('account.analytic.account', 'Refund Tax Analytic Account', help="Set the analytic account that will be used by default on the invoice tax lines for refunds. Leave empty if you don't want to use an analytic account on the invoice tax lines by default."),
'parent_id':fields.many2one('account.tax', 'Parent Tax Account', select=True),
'child_ids':fields.one2many('account.tax', 'parent_id', 'Child Tax Accounts'),
'child_depend':fields.boolean('Tax on Children', help="Set if the tax computation is based on the computation of child taxes rather than on the total amount."),
'python_compute':fields.text('Python Code'),
'python_compute_inv':fields.text('Python Code (reverse)'),
'python_applicable':fields.text('Applicable Code'),
#
# Fields used for the Tax declaration
#
'base_code_id': fields.many2one('account.tax.code', 'Account Base Code', help="Use this code for the tax declaration."),
'tax_code_id': fields.many2one('account.tax.code', 'Account Tax Code', help="Use this code for the tax declaration."),
'base_sign': fields.float('Base Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'tax_sign': fields.float('Tax Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
# Same fields for refund invoices
'ref_base_code_id': fields.many2one('account.tax.code', 'Refund Base Code', help="Use this code for the tax declaration."),
'ref_tax_code_id': fields.many2one('account.tax.code', 'Refund Tax Code', help="Use this code for the tax declaration."),
'ref_base_sign': fields.float('Refund Base Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'ref_tax_sign': fields.float('Refund Tax Code Sign', help="Usually 1 or -1.", digits_compute=get_precision_tax()),
'include_base_amount': fields.boolean('Included in base amount', help="Indicates if the amount of tax must be included in the base amount for the computation of the next taxes"),
'company_id': fields.many2one('res.company', 'Company', required=True),
'description': fields.char('Tax Code'),
'price_include': fields.boolean('Tax Included in Price', help="Check this if the price you use on the product and invoices includes this tax."),
'type_tax_use': fields.selection([('sale','Sale'),('purchase','Purchase'),('all','All')], 'Tax Application', required=True)
}
_sql_constraints = [
('name_company_uniq', 'unique(name, company_id)', 'Tax Name must be unique per company!'),
]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=80):
"""
Returns a list of tupples containing id, name, as internally it is called {def name_get}
result format: {[(id, name), (id, name), ...]}
@param cr: A database cursor
@param user: ID of the user currently logged in
@param name: name to search
@param args: other arguments
@param operator: default operator is 'ilike', it can be changed
@param context: context arguments, like lang, time zone
@param limit: Returns first 'n' ids of complete result, default is 80.
@return: Returns a list of tupples containing id and name
"""
if not args:
args = []
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = [('description', operator, name), ('name', operator, name)]
else:
domain = ['|', ('description', operator, name), ('name', operator, name)]
ids = self.search(cr, user, expression.AND([domain, args]), limit=limit, context=context)
return self.name_get(cr, user, ids, context=context)
def write(self, cr, uid, ids, vals, context=None):
if vals.get('type', False) and vals['type'] in ('none', 'code'):
vals.update({'amount': 0.0})
return super(account_tax, self).write(cr, uid, ids, vals, context=context)
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
journal_pool = self.pool.get('account.journal')
if context.get('type'):
if context.get('type') in ('out_invoice','out_refund'):
args += [('type_tax_use','in',['sale','all'])]
elif context.get('type') in ('in_invoice','in_refund'):
args += [('type_tax_use','in',['purchase','all'])]
if context.get('journal_id'):
journal = journal_pool.browse(cr, uid, context.get('journal_id'))
if journal.type in ('sale', 'purchase'):
args += [('type_tax_use','in',[journal.type,'all'])]
return super(account_tax, self).search(cr, uid, args, offset, limit, order, context, count)
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for record in self.read(cr, uid, ids, ['description','name'], context=context):
name = record['description'] and record['description'] or record['name']
res.append((record['id'],name ))
return res
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'python_compute': '''# price_unit\n# or False\n# product: product.product object or None\n# partner: res.partner object or None\n\nresult = price_unit * 0.10''',
'python_compute_inv': '''# price_unit\n# product: product.product object or False\n\nresult = price_unit * 0.10''',
'applicable_type': 'true',
'type': 'percent',
'amount': 0,
'price_include': 0,
'active': 1,
'type_tax_use': 'all',
'sequence': 1,
'ref_tax_sign': 1,
'ref_base_sign': 1,
'tax_sign': 1,
'base_sign': 1,
'include_base_amount': False,
'company_id': _default_company,
}
_order = 'sequence'
def _applicable(self, cr, uid, taxes, price_unit, product=None, partner=None):
res = []
for tax in taxes:
if tax.applicable_type=='code':
localdict = {'price_unit':price_unit, 'product':product, 'partner':partner}
exec tax.python_applicable in localdict
if localdict.get('result', False):
res.append(tax)
else:
res.append(tax)
return res
def _unit_compute(self, cr, uid, taxes, price_unit, product=None, partner=None, quantity=0):
taxes = self._applicable(cr, uid, taxes, price_unit ,product, partner)
res = []
cur_price_unit=price_unit
for tax in taxes:
# we compute the amount for the current tax object and append it to the result
data = {'id':tax.id,
'name':tax.description and tax.description + " - " + tax.name or tax.name,
'account_collected_id':tax.account_collected_id.id,
'account_paid_id':tax.account_paid_id.id,
'account_analytic_collected_id': tax.account_analytic_collected_id.id,
'account_analytic_paid_id': tax.account_analytic_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
}
res.append(data)
if tax.type=='percent':
amount = cur_price_unit * tax.amount
data['amount'] = amount
elif tax.type=='fixed':
data['amount'] = tax.amount
data['tax_amount']=quantity
# data['amount'] = quantity
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner}
exec tax.python_compute in localdict
amount = localdict['result']
data['amount'] = amount
elif tax.type=='balance':
data['amount'] = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
data['balance'] = cur_price_unit
amount2 = data.get('amount', 0.0)
if tax.child_ids:
if tax.child_depend:
latest = res.pop()
amount = amount2
child_tax = self._unit_compute(cr, uid, tax.child_ids, amount, product, partner, quantity)
res.extend(child_tax)
for child in child_tax:
amount2 += child.get('amount', 0.0)
if tax.child_depend:
for r in res:
for name in ('base','ref_base'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['price_unit'] = latest['price_unit']
latest[name+'_code_id'] = False
for name in ('tax','ref_tax'):
if latest[name+'_code_id'] and latest[name+'_sign'] and not r[name+'_code_id']:
r[name+'_code_id'] = latest[name+'_code_id']
r[name+'_sign'] = latest[name+'_sign']
r['amount'] = data['amount']
latest[name+'_code_id'] = False
if tax.include_base_amount:
cur_price_unit+=amount2
return res
def compute_for_bank_reconciliation(self, cr, uid, tax_id, amount, context=None):
""" Called by RPC by the bank statement reconciliation widget """
tax = self.browse(cr, uid, tax_id, context=context)
return self.compute_all(cr, uid, [tax], amount, 1) # TOCHECK may use force_exclude parameter
def compute_all(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, force_excluded=False):
"""
:param force_excluded: boolean used to say that we don't want to consider the value of field price_include of
tax. It's used in encoding by line where you don't matter if you encoded a tax with that boolean to True or
False
RETURN: {
'total': 0.0, # Total without taxes
'total_included: 0.0, # Total with taxes
'taxes': [] # List of taxes, see compute for the format
}
"""
# By default, for each tax, tax amount will first be computed
# and rounded at the 'Account' decimal precision for each
# PO/SO/invoice line and then these rounded amounts will be
# summed, leading to the total amount for that tax. But, if the
# company has tax_calculation_rounding_method = round_globally,
# we still follow the same method, but we use a much larger
# precision when we round the tax amount for each line (we use
# the 'Account' decimal precision + 5), and that way it's like
# rounding after the sum of the tax amounts of each line
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
tax_compute_precision = precision
if taxes and taxes[0].company_id.tax_calculation_rounding_method == 'round_globally':
tax_compute_precision += 5
totalin = totalex = round(price_unit * quantity, precision)
tin = []
tex = []
for tax in taxes:
if not tax.price_include or force_excluded:
tex.append(tax)
else:
tin.append(tax)
tin = self.compute_inv(cr, uid, tin, price_unit, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tin:
totalex -= r.get('amount', 0.0)
totlex_qty = 0.0
try:
totlex_qty = totalex/quantity
except:
pass
tex = self._compute(cr, uid, tex, totlex_qty, quantity, product=product, partner=partner, precision=tax_compute_precision)
for r in tex:
totalin += r.get('amount', 0.0)
return {
'total': totalex,
'total_included': totalin,
'taxes': tin + tex
}
def compute(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None):
_logger.warning("Deprecated, use compute_all(...)['taxes'] instead of compute(...) to manage prices with tax included.")
return self._compute(cr, uid, taxes, price_unit, quantity, product, partner)
def _compute(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, precision=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute(cr, uid, taxes, price_unit, product, partner, quantity)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r.get('balance', 0.0) * quantity, precision) - total
else:
r['amount'] = round(r.get('amount', 0.0) * quantity, precision)
total += r['amount']
return res
def _unit_compute_inv(self, cr, uid, taxes, price_unit, product=None, partner=None):
taxes = self._applicable(cr, uid, taxes, price_unit, product, partner)
res = []
taxes.reverse()
cur_price_unit = price_unit
tax_parent_tot = 0.0
for tax in taxes:
if (tax.type=='percent') and not tax.include_base_amount:
tax_parent_tot += tax.amount
for tax in taxes:
if (tax.type=='fixed') and not tax.include_base_amount:
cur_price_unit -= tax.amount
for tax in taxes:
if tax.type=='percent':
if tax.include_base_amount:
amount = cur_price_unit - (cur_price_unit / (1 + tax.amount))
else:
amount = (cur_price_unit / (1 + tax_parent_tot)) * tax.amount
elif tax.type=='fixed':
amount = tax.amount
elif tax.type=='code':
localdict = {'price_unit':cur_price_unit, 'product':product, 'partner':partner}
exec tax.python_compute_inv in localdict
amount = localdict['result']
elif tax.type=='balance':
amount = cur_price_unit - reduce(lambda x,y: y.get('amount',0.0)+x, res, 0.0)
if tax.include_base_amount:
cur_price_unit -= amount
todo = 0
else:
todo = 1
res.append({
'id': tax.id,
'todo': todo,
'name': tax.name,
'amount': amount,
'account_collected_id': tax.account_collected_id.id,
'account_paid_id': tax.account_paid_id.id,
'account_analytic_collected_id': tax.account_analytic_collected_id.id,
'account_analytic_paid_id': tax.account_analytic_paid_id.id,
'base_code_id': tax.base_code_id.id,
'ref_base_code_id': tax.ref_base_code_id.id,
'sequence': tax.sequence,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'price_unit': cur_price_unit,
'tax_code_id': tax.tax_code_id.id,
'ref_tax_code_id': tax.ref_tax_code_id.id,
})
if tax.child_ids:
if tax.child_depend:
del res[-1]
amount = price_unit
parent_tax = self._unit_compute_inv(cr, uid, tax.child_ids, amount, product, partner)
res.extend(parent_tax)
total = 0.0
for r in res:
if r['todo']:
total += r['amount']
for r in res:
r['price_unit'] -= total
r['todo'] = 0
return res
def compute_inv(self, cr, uid, taxes, price_unit, quantity, product=None, partner=None, precision=None):
"""
Compute tax values for given PRICE_UNIT, QUANTITY and a buyer/seller ADDRESS_ID.
Price Unit is a Tax included price
RETURN:
[ tax ]
tax = {'name':'', 'amount':0.0, 'account_collected_id':1, 'account_paid_id':2}
one tax for each tax id in IDS and their children
"""
if not precision:
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Account')
res = self._unit_compute_inv(cr, uid, taxes, price_unit, product, partner=None)
total = 0.0
for r in res:
if r.get('balance',False):
r['amount'] = round(r['balance'] * quantity, precision) - total
else:
r['amount'] = round(r['amount'] * quantity, precision)
total += r['amount']
return res
# ---------------------------------------------------------
# Account Entries Models
# ---------------------------------------------------------
class account_model(osv.osv):
_name = "account.model"
_description = "Account Model"
_columns = {
'name': fields.char('Model Name', size=64, required=True, help="This is a model for recurring accounting entries"),
'journal_id': fields.many2one('account.journal', 'Journal', required=True),
'company_id': fields.related('journal_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'lines_id': fields.one2many('account.model.line', 'model_id', 'Model Entries'),
'legend': fields.text('Legend', readonly=True, size=100),
}
_defaults = {
'legend': lambda self, cr, uid, context:_('You can specify year, month and date in the name of the model using the following labels:\n\n%(year)s: To Specify Year \n%(month)s: To Specify Month \n%(date)s: Current Date\n\ne.g. My model on %(date)s'),
}
def generate(self, cr, uid, ids, data=None, context=None):
if data is None:
data = {}
move_ids = []
entry = {}
account_move_obj = self.pool.get('account.move')
account_move_line_obj = self.pool.get('account.move.line')
pt_obj = self.pool.get('account.payment.term')
period_obj = self.pool.get('account.period')
if context is None:
context = {}
if data.get('date', False):
context.update({'date': data['date']})
move_date = context.get('date', time.strftime('%Y-%m-%d'))
move_date = datetime.strptime(move_date,"%Y-%m-%d")
for model in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
ctx.update({'company_id': model.company_id.id})
period_ids = period_obj.find(cr, uid, dt=context.get('date', False), context=ctx)
period_id = period_ids and period_ids[0] or False
ctx.update({'journal_id': model.journal_id.id,'period_id': period_id})
try:
entry['name'] = model.name%{'year': move_date.strftime('%Y'), 'month': move_date.strftime('%m'), 'date': move_date.strftime('%Y-%m')}
except:
raise osv.except_osv(_('Wrong Model!'), _('You have a wrong expression "%(...)s" in your model!'))
move_id = account_move_obj.create(cr, uid, {
'ref': entry['name'],
'period_id': period_id,
'journal_id': model.journal_id.id,
'date': context.get('date', fields.date.context_today(self,cr,uid,context=context))
})
move_ids.append(move_id)
for line in model.lines_id:
analytic_account_id = False
if line.analytic_account_id:
if not model.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (model.journal_id.name,))
analytic_account_id = line.analytic_account_id.id
val = {
'move_id': move_id,
'journal_id': model.journal_id.id,
'period_id': period_id,
'analytic_account_id': analytic_account_id
}
date_maturity = context.get('date',time.strftime('%Y-%m-%d'))
if line.date_maturity == 'partner':
if not line.partner_id:
raise osv.except_osv(_('Error!'), _("Maturity date of entry line generated by model line '%s' of model '%s' is based on partner payment term!" \
"\nPlease define partner on it!")%(line.name, model.name))
payment_term_id = False
if model.journal_id.type in ('purchase', 'purchase_refund') and line.partner_id.property_supplier_payment_term:
payment_term_id = line.partner_id.property_supplier_payment_term.id
elif line.partner_id.property_payment_term:
payment_term_id = line.partner_id.property_payment_term.id
if payment_term_id:
pterm_list = pt_obj.compute(cr, uid, payment_term_id, value=1, date_ref=date_maturity)
if pterm_list:
pterm_list = [l[0] for l in pterm_list]
pterm_list.sort()
date_maturity = pterm_list[-1]
val.update({
'name': line.name,
'quantity': line.quantity,
'debit': line.debit,
'credit': line.credit,
'account_id': line.account_id.id,
'move_id': move_id,
'partner_id': line.partner_id.id,
'date': context.get('date', fields.date.context_today(self,cr,uid,context=context)),
'date_maturity': date_maturity
})
account_move_line_obj.create(cr, uid, val, context=ctx)
return move_ids
def onchange_journal_id(self, cr, uid, ids, journal_id, context=None):
company_id = False
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
if journal.company_id.id:
company_id = journal.company_id.id
return {'value': {'company_id': company_id}}
class account_model_line(osv.osv):
_name = "account.model.line"
_description = "Account Model Entries"
_columns = {
'name': fields.char('Name', size=64, required=True),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the resources from lower sequences to higher ones."),
'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Account'), help="The optional quantity on entries."),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade"),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', ondelete="cascade"),
'model_id': fields.many2one('account.model', 'Model', required=True, ondelete="cascade", select=True),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency."),
'currency_id': fields.many2one('res.currency', 'Currency'),
'partner_id': fields.many2one('res.partner', 'Partner'),
'date_maturity': fields.selection([('today','Date of the day'), ('partner','Partner Payment Term')], 'Maturity Date', help="The maturity date of the generated entries for this model. You can choose between the creation date or the creation date of the entries plus the partner payment terms."),
}
_order = 'sequence'
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in model, they must be positive!'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in model, they must be positive!'),
]
# ---------------------------------------------------------
# Account Subscription
# ---------------------------------------------------------
class account_subscription(osv.osv):
_name = "account.subscription"
_description = "Account Subscription"
_columns = {
'name': fields.char('Name', size=64, required=True),
'ref': fields.char('Reference', size=16),
'model_id': fields.many2one('account.model', 'Model', required=True),
'date_start': fields.date('Start Date', required=True),
'period_total': fields.integer('Number of Periods', required=True),
'period_nbr': fields.integer('Period', required=True),
'period_type': fields.selection([('day','days'),('month','month'),('year','year')], 'Period Type', required=True),
'state': fields.selection([('draft','Draft'),('running','Running'),('done','Done')], 'Status', required=True, readonly=True),
'lines_id': fields.one2many('account.subscription.line', 'subscription_id', 'Subscription Lines')
}
_defaults = {
'date_start': fields.date.context_today,
'period_type': 'month',
'period_total': 12,
'period_nbr': 1,
'state': 'draft',
}
def state_draft(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'draft'})
return False
def check(self, cr, uid, ids, context=None):
todone = []
for sub in self.browse(cr, uid, ids, context=context):
ok = True
for line in sub.lines_id:
if not line.move_id.id:
ok = False
break
if ok:
todone.append(sub.id)
if todone:
self.write(cr, uid, todone, {'state':'done'})
return False
def remove_line(self, cr, uid, ids, context=None):
toremove = []
for sub in self.browse(cr, uid, ids, context=context):
for line in sub.lines_id:
if not line.move_id.id:
toremove.append(line.id)
if toremove:
self.pool.get('account.subscription.line').unlink(cr, uid, toremove)
self.write(cr, uid, ids, {'state':'draft'})
return False
def compute(self, cr, uid, ids, context=None):
for sub in self.browse(cr, uid, ids, context=context):
ds = sub.date_start
for i in range(sub.period_total):
self.pool.get('account.subscription.line').create(cr, uid, {
'date': ds,
'subscription_id': sub.id,
})
if sub.period_type=='day':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(days=sub.period_nbr)).strftime('%Y-%m-%d')
if sub.period_type=='month':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(months=sub.period_nbr)).strftime('%Y-%m-%d')
if sub.period_type=='year':
ds = (datetime.strptime(ds, '%Y-%m-%d') + relativedelta(years=sub.period_nbr)).strftime('%Y-%m-%d')
self.write(cr, uid, ids, {'state':'running'})
return True
class account_subscription_line(osv.osv):
_name = "account.subscription.line"
_description = "Account Subscription Line"
_columns = {
'subscription_id': fields.many2one('account.subscription', 'Subscription', required=True, select=True),
'date': fields.date('Date', required=True),
'move_id': fields.many2one('account.move', 'Entry'),
}
def move_create(self, cr, uid, ids, context=None):
tocheck = {}
all_moves = []
obj_model = self.pool.get('account.model')
for line in self.browse(cr, uid, ids, context=context):
data = {
'date': line.date,
}
move_ids = obj_model.generate(cr, uid, [line.subscription_id.model_id.id], data, context)
tocheck[line.subscription_id.id] = True
self.write(cr, uid, [line.id], {'move_id':move_ids[0]})
all_moves.extend(move_ids)
if tocheck:
self.pool.get('account.subscription').check(cr, uid, tocheck.keys(), context)
return all_moves
_rec_name = 'date'
# ---------------------------------------------------------------
# Account Templates: Account, Tax, Tax Code and chart. + Wizard
# ---------------------------------------------------------------
class account_tax_template(osv.osv):
_name = 'account.tax.template'
class account_account_template(osv.osv):
_order = "code"
_name = "account.account.template"
_description ='Templates for Accounts'
_columns = {
'name': fields.char('Name', size=256, required=True, select=True),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'code': fields.char('Code', size=64, required=True, select=1),
'type': fields.selection([
('receivable','Receivable'),
('payable','Payable'),
('view','View'),
('consolidation','Consolidation'),
('liquidity','Liquidity'),
('other','Regular'),
('closed','Closed'),
], 'Internal Type', required=True,help="This type is used to differentiate types with "\
"special effects in OpenERP: view can not have entries, consolidation are accounts that "\
"can have children accounts for multi-company consolidations, payable/receivable are for "\
"partners accounts (for debit/credit computations), closed for depreciated accounts."),
'user_type': fields.many2one('account.account.type', 'Account Type', required=True,
help="These types are defined according to your country. The type contains more information "\
"about the account and its specificities."),
'financial_report_ids': fields.many2many('account.financial.report', 'account_template_financial_report', 'account_template_id', 'report_line_id', 'Financial Reports'),
'reconcile': fields.boolean('Allow Reconciliation', help="Check this option if you want the user to reconcile entries in this account."),
'shortcut': fields.char('Shortcut', size=12),
'note': fields.text('Note'),
'parent_id': fields.many2one('account.account.template', 'Parent Account Template', ondelete='cascade', domain=[('type','=','view')]),
'child_parent_ids':fields.one2many('account.account.template', 'parent_id', 'Children'),
'tax_ids': fields.many2many('account.tax.template', 'account_account_template_tax_rel', 'account_id', 'tax_id', 'Default Taxes'),
'nocreate': fields.boolean('Optional create', help="If checked, the new chart of accounts will not contain this by default."),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', help="This optional field allow you to link an account template to a specific chart template that may differ from the one its root parent belongs to. This allow you to define chart templates that extend another and complete it with few new accounts (You don't need to define the whole structure that is common to both several times)."),
}
_defaults = {
'reconcile': False,
'type': 'view',
'nocreate': False,
}
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive account templates.', ['parent_id']),
]
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
reads = self.read(cr, uid, ids, ['name','code'], context=context)
res = []
for record in reads:
name = record['name']
if record['code']:
name = record['code']+' '+name
res.append((record['id'],name ))
return res
def generate_account(self, cr, uid, chart_template_id, tax_template_ref, acc_template_ref, code_digits, company_id, context=None):
"""
This method for generating accounts from templates.
:param chart_template_id: id of the chart template chosen in the wizard
:param tax_template_ref: Taxes templates reference for write taxes_id in account_account.
:paramacc_template_ref: dictionary with the mappping between the account templates and the real accounts.
:param code_digits: number of digits got from wizard.multi.charts.accounts, this is use for account code.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: return acc_template_ref for reference purpose.
:rtype: dict
"""
if context is None:
context = {}
obj_acc = self.pool.get('account.account')
company_name = self.pool.get('res.company').browse(cr, uid, company_id, context=context).name
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
#deactivate the parent_store functionnality on account_account for rapidity purpose
ctx = context.copy()
ctx.update({'defer_parent_store_computation': True})
level_ref = {}
children_acc_criteria = [('chart_template_id','=', chart_template_id)]
if template.account_root_id.id:
children_acc_criteria = ['|'] + children_acc_criteria + ['&',('parent_id','child_of', [template.account_root_id.id]),('chart_template_id','=', False)]
children_acc_template = self.search(cr, uid, [('nocreate','!=',True)] + children_acc_criteria, order='id')
for account_template in self.browse(cr, uid, children_acc_template, context=context):
# skip the root of COA if it's not the main one
if (template.account_root_id.id == account_template.id) and template.parent_id:
continue
tax_ids = []
for tax in account_template.tax_ids:
tax_ids.append(tax_template_ref[tax.id])
code_main = account_template.code and len(account_template.code) or 0
code_acc = account_template.code or ''
if code_main > 0 and code_main <= code_digits and account_template.type != 'view':
code_acc = str(code_acc) + (str('0'*(code_digits-code_main)))
parent_id = account_template.parent_id and ((account_template.parent_id.id in acc_template_ref) and acc_template_ref[account_template.parent_id.id]) or False
#the level as to be given as well at the creation time, because of the defer_parent_store_computation in
#context. Indeed because of this, the parent_left and parent_right are not computed and thus the child_of
#operator does not return the expected values, with result of having the level field not computed at all.
if parent_id:
level = parent_id in level_ref and level_ref[parent_id] + 1 or obj_acc._get_level(cr, uid, [parent_id], 'level', None, context=context)[parent_id] + 1
else:
level = 0
vals={
'name': (template.account_root_id.id == account_template.id) and company_name or account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': code_acc,
'type': account_template.type,
'user_type': account_template.user_type and account_template.user_type.id or False,
'reconcile': account_template.reconcile,
'shortcut': account_template.shortcut,
'note': account_template.note,
'financial_report_ids': account_template.financial_report_ids and [(6,0,[x.id for x in account_template.financial_report_ids])] or False,
'parent_id': parent_id,
'tax_ids': [(6,0,tax_ids)],
'company_id': company_id,
'level': level,
}
new_account = obj_acc.create(cr, uid, vals, context=ctx)
acc_template_ref[account_template.id] = new_account
level_ref[new_account] = level
#reactivate the parent_store functionnality on account_account
obj_acc._parent_store_compute(cr)
return acc_template_ref
class account_add_tmpl_wizard(osv.osv_memory):
"""Add one more account from the template.
With the 'nocreate' option, some accounts may not be created. Use this to add them later."""
_name = 'account.addtmpl.wizard'
def _get_def_cparent(self, cr, uid, context=None):
acc_obj = self.pool.get('account.account')
tmpl_obj = self.pool.get('account.account.template')
tids = tmpl_obj.read(cr, uid, [context['tmpl_ids']], ['parent_id'])
if not tids or not tids[0]['parent_id']:
return False
ptids = tmpl_obj.read(cr, uid, [tids[0]['parent_id'][0]], ['code'])
res = None
if not ptids or not ptids[0]['code']:
raise osv.except_osv(_('Error!'), _('There is no parent code for the template account.'))
res = acc_obj.search(cr, uid, [('code','=',ptids[0]['code'])])
return res and res[0] or False
_columns = {
'cparent_id':fields.many2one('account.account', 'Parent target', help="Creates an account with the selected template under this existing parent.", required=True),
}
_defaults = {
'cparent_id': _get_def_cparent,
}
def action_create(self,cr,uid,ids,context=None):
if context is None:
context = {}
acc_obj = self.pool.get('account.account')
tmpl_obj = self.pool.get('account.account.template')
data = self.read(cr, uid, ids)[0]
company_id = acc_obj.read(cr, uid, [data['cparent_id'][0]], ['company_id'])[0]['company_id'][0]
account_template = tmpl_obj.browse(cr, uid, context['tmpl_ids'])
vals = {
'name': account_template.name,
'currency_id': account_template.currency_id and account_template.currency_id.id or False,
'code': account_template.code,
'type': account_template.type,
'user_type': account_template.user_type and account_template.user_type.id or False,
'reconcile': account_template.reconcile,
'shortcut': account_template.shortcut,
'note': account_template.note,
'parent_id': data['cparent_id'][0],
'company_id': company_id,
}
acc_obj.create(cr, uid, vals)
return {'type':'state', 'state': 'end' }
def action_cancel(self, cr, uid, ids, context=None):
return { 'type': 'state', 'state': 'end' }
class account_tax_code_template(osv.osv):
_name = 'account.tax.code.template'
_description = 'Tax Code Template'
_order = 'code'
_rec_name = 'code'
_columns = {
'name': fields.char('Tax Case Name', size=64, required=True),
'code': fields.char('Case Code', size=64),
'info': fields.text('Description'),
'parent_id': fields.many2one('account.tax.code.template', 'Parent Code', select=True),
'child_ids': fields.one2many('account.tax.code.template', 'parent_id', 'Child Codes'),
'sign': fields.float('Sign For Parent', required=True),
'notprintable':fields.boolean("Not Printable in Invoice", help="Check this box if you don't want any tax related to this tax Code to appear on invoices."),
}
_defaults = {
'sign': 1.0,
'notprintable': False,
}
def generate_tax_code(self, cr, uid, tax_code_root_id, company_id, context=None):
'''
This function generates the tax codes from the templates of tax code that are children of the given one passed
in argument. Then it returns a dictionary with the mappping between the templates and the real objects.
:param tax_code_root_id: id of the root of all the tax code templates to process
:param company_id: id of the company the wizard is running for
:returns: dictionary with the mappping between the templates and the real objects.
:rtype: dict
'''
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_code = self.pool.get('account.tax.code')
tax_code_template_ref = {}
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
#find all the children of the tax_code_root_id
children_tax_code_template = tax_code_root_id and obj_tax_code_template.search(cr, uid, [('parent_id','child_of',[tax_code_root_id])], order='id') or []
for tax_code_template in obj_tax_code_template.browse(cr, uid, children_tax_code_template, context=context):
vals = {
'name': (tax_code_root_id == tax_code_template.id) and company.name or tax_code_template.name,
'code': tax_code_template.code,
'info': tax_code_template.info,
'parent_id': tax_code_template.parent_id and ((tax_code_template.parent_id.id in tax_code_template_ref) and tax_code_template_ref[tax_code_template.parent_id.id]) or False,
'company_id': company_id,
'sign': tax_code_template.sign,
}
#check if this tax code already exists
rec_list = obj_tax_code.search(cr, uid, [('name', '=', vals['name']),('code', '=', vals['code']),('company_id', '=', vals['company_id'])], context=context)
if not rec_list:
#if not yet, create it
new_tax_code = obj_tax_code.create(cr, uid, vals)
#recording the new tax code to do the mapping
tax_code_template_ref[tax_code_template.id] = new_tax_code
return tax_code_template_ref
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
if isinstance(ids, (int, long)):
ids = [ids]
reads = self.read(cr, uid, ids, ['name','code'], context, load='_classic_write')
return [(x['id'], (x['code'] and x['code'] + ' - ' or '') + x['name']) \
for x in reads]
_check_recursion = check_cycle
_constraints = [
(_check_recursion, 'Error!\nYou cannot create recursive Tax Codes.', ['parent_id'])
]
_order = 'code,name'
class account_chart_template(osv.osv):
_name="account.chart.template"
_description= "Templates for Account Chart"
_columns={
'name': fields.char('Name', size=64, required=True),
'parent_id': fields.many2one('account.chart.template', 'Parent Chart Template'),
'code_digits': fields.integer('# of Digits', required=True, help="No. of Digits to use for account code"),
'visible': fields.boolean('Can be Visible?', help="Set this to False if you don't want this template to be used actively in the wizard that generate Chart of Accounts from templates, this is useful when you want to generate accounts of this template only when loading its child template."),
'currency_id': fields.many2one('res.currency', 'Currency'),
'complete_tax_set': fields.boolean('Complete Set of Taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sale and purchase rates or choose from list of taxes. This last choice assumes that the set of tax defined on this template is complete'),
'account_root_id': fields.many2one('account.account.template', 'Root Account', domain=[('parent_id','=',False)]),
'tax_code_root_id': fields.many2one('account.tax.code.template', 'Root Tax Code', domain=[('parent_id','=',False)]),
'tax_template_ids': fields.one2many('account.tax.template', 'chart_template_id', 'Tax Template List', help='List of all the taxes that have to be installed by the wizard'),
'bank_account_view_id': fields.many2one('account.account.template', 'Bank Account'),
'property_account_receivable': fields.many2one('account.account.template', 'Receivable Account'),
'property_account_payable': fields.many2one('account.account.template', 'Payable Account'),
'property_account_expense_categ': fields.many2one('account.account.template', 'Expense Category Account'),
'property_account_income_categ': fields.many2one('account.account.template', 'Income Category Account'),
'property_account_expense': fields.many2one('account.account.template', 'Expense Account on Product Template'),
'property_account_income': fields.many2one('account.account.template', 'Income Account on Product Template'),
'property_account_income_opening': fields.many2one('account.account.template', 'Opening Entries Income Account'),
'property_account_expense_opening': fields.many2one('account.account.template', 'Opening Entries Expense Account'),
}
_defaults = {
'visible': True,
'code_digits': 6,
'complete_tax_set': True,
}
class account_tax_template(osv.osv):
_name = 'account.tax.template'
_description = 'Templates for Taxes'
_columns = {
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'name': fields.char('Tax Name', size=64, required=True),
'sequence': fields.integer('Sequence', required=True, help="The sequence field is used to order the taxes lines from lower sequences to higher ones. The order is important if you have a tax that has several tax children. In this case, the evaluation order is important."),
'amount': fields.float('Amount', required=True, digits_compute=get_precision_tax(), help="For Tax Type percent enter % ratio between 0-1."),
'type': fields.selection( [('percent','Percent'), ('fixed','Fixed'), ('none','None'), ('code','Python Code'), ('balance','Balance')], 'Tax Type', required=True),
'applicable_type': fields.selection( [('true','True'), ('code','Python Code')], 'Applicable Type', required=True, help="If not applicable (computed through a Python code), the tax won't appear on the invoice."),
'domain':fields.char('Domain', size=32, help="This field is only used if you develop your own module allowing developers to create specific taxes in a custom domain."),
'account_collected_id':fields.many2one('account.account.template', 'Invoice Tax Account'),
'account_paid_id':fields.many2one('account.account.template', 'Refund Tax Account'),
'parent_id':fields.many2one('account.tax.template', 'Parent Tax Account', select=True),
'child_depend':fields.boolean('Tax on Children', help="Set if the tax computation is based on the computation of child taxes rather than on the total amount."),
'python_compute':fields.text('Python Code'),
'python_compute_inv':fields.text('Python Code (reverse)'),
'python_applicable':fields.text('Applicable Code'),
#
# Fields used for the Tax declaration
#
'base_code_id': fields.many2one('account.tax.code.template', 'Base Code', help="Use this code for the tax declaration."),
'tax_code_id': fields.many2one('account.tax.code.template', 'Tax Code', help="Use this code for the tax declaration."),
'base_sign': fields.float('Base Code Sign', help="Usually 1 or -1."),
'tax_sign': fields.float('Tax Code Sign', help="Usually 1 or -1."),
# Same fields for refund invoices
'ref_base_code_id': fields.many2one('account.tax.code.template', 'Refund Base Code', help="Use this code for the tax declaration."),
'ref_tax_code_id': fields.many2one('account.tax.code.template', 'Refund Tax Code', help="Use this code for the tax declaration."),
'ref_base_sign': fields.float('Refund Base Code Sign', help="Usually 1 or -1."),
'ref_tax_sign': fields.float('Refund Tax Code Sign', help="Usually 1 or -1."),
'include_base_amount': fields.boolean('Include in Base Amount', help="Set if the amount of tax must be included in the base amount before computing the next taxes."),
'description': fields.char('Internal Name'),
'type_tax_use': fields.selection([('sale','Sale'),('purchase','Purchase'),('all','All')], 'Tax Use In', required=True,),
'price_include': fields.boolean('Tax Included in Price', help="Check this if the price you use on the product and invoices includes this tax."),
}
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
res = []
for record in self.read(cr, uid, ids, ['description','name'], context=context):
name = record['description'] and record['description'] or record['name']
res.append((record['id'],name ))
return res
def _default_company(self, cr, uid, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
if user.company_id:
return user.company_id.id
return self.pool.get('res.company').search(cr, uid, [('parent_id', '=', False)])[0]
_defaults = {
'python_compute': lambda *a: '''# price_unit\n# product: product.product object or None\n# partner: res.partner object or None\n\nresult = price_unit * 0.10''',
'python_compute_inv': lambda *a: '''# price_unit\n# product: product.product object or False\n\nresult = price_unit * 0.10''',
'applicable_type': 'true',
'type': 'percent',
'amount': 0,
'sequence': 1,
'ref_tax_sign': 1,
'ref_base_sign': 1,
'tax_sign': 1,
'base_sign': 1,
'include_base_amount': False,
'type_tax_use': 'all',
'price_include': 0,
}
_order = 'sequence'
def _generate_tax(self, cr, uid, tax_templates, tax_code_template_ref, company_id, context=None):
"""
This method generate taxes from templates.
:param tax_templates: list of browse record of the tax templates to process
:param tax_code_template_ref: Taxcode templates reference.
:param company_id: id of the company the wizard is running for
:returns:
{
'tax_template_to_tax': mapping between tax template and the newly generated taxes corresponding,
'account_dict': dictionary containing a to-do list with all the accounts to assign on new taxes
}
"""
if context is None:
context = {}
res = {}
todo_dict = {}
tax_template_to_tax = {}
for tax in tax_templates:
vals_tax = {
'name':tax.name,
'sequence': tax.sequence,
'amount': tax.amount,
'type': tax.type,
'applicable_type': tax.applicable_type,
'domain': tax.domain,
'parent_id': tax.parent_id and ((tax.parent_id.id in tax_template_to_tax) and tax_template_to_tax[tax.parent_id.id]) or False,
'child_depend': tax.child_depend,
'python_compute': tax.python_compute,
'python_compute_inv': tax.python_compute_inv,
'python_applicable': tax.python_applicable,
'base_code_id': tax.base_code_id and ((tax.base_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.base_code_id.id]) or False,
'tax_code_id': tax.tax_code_id and ((tax.tax_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.tax_code_id.id]) or False,
'base_sign': tax.base_sign,
'tax_sign': tax.tax_sign,
'ref_base_code_id': tax.ref_base_code_id and ((tax.ref_base_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.ref_base_code_id.id]) or False,
'ref_tax_code_id': tax.ref_tax_code_id and ((tax.ref_tax_code_id.id in tax_code_template_ref) and tax_code_template_ref[tax.ref_tax_code_id.id]) or False,
'ref_base_sign': tax.ref_base_sign,
'ref_tax_sign': tax.ref_tax_sign,
'include_base_amount': tax.include_base_amount,
'description': tax.description,
'company_id': company_id,
'type_tax_use': tax.type_tax_use,
'price_include': tax.price_include
}
new_tax = self.pool.get('account.tax').create(cr, uid, vals_tax)
tax_template_to_tax[tax.id] = new_tax
#as the accounts have not been created yet, we have to wait before filling these fields
todo_dict[new_tax] = {
'account_collected_id': tax.account_collected_id and tax.account_collected_id.id or False,
'account_paid_id': tax.account_paid_id and tax.account_paid_id.id or False,
}
res.update({'tax_template_to_tax': tax_template_to_tax, 'account_dict': todo_dict})
return res
# Fiscal Position Templates
class account_fiscal_position_template(osv.osv):
_name = 'account.fiscal.position.template'
_description = 'Template for Fiscal Position'
_columns = {
'name': fields.char('Fiscal Position Template', size=64, required=True),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'account_ids': fields.one2many('account.fiscal.position.account.template', 'position_id', 'Account Mapping'),
'tax_ids': fields.one2many('account.fiscal.position.tax.template', 'position_id', 'Tax Mapping'),
'note': fields.text('Notes', translate=True),
}
def generate_fiscal_position(self, cr, uid, chart_temp_id, tax_template_ref, acc_template_ref, company_id, context=None):
"""
This method generate Fiscal Position, Fiscal Position Accounts and Fiscal Position Taxes from templates.
:param chart_temp_id: Chart Template Id.
:param taxes_ids: Taxes templates reference for generating account.fiscal.position.tax.
:param acc_template_ref: Account templates reference for generating account.fiscal.position.account.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
if context is None:
context = {}
obj_tax_fp = self.pool.get('account.fiscal.position.tax')
obj_ac_fp = self.pool.get('account.fiscal.position.account')
obj_fiscal_position = self.pool.get('account.fiscal.position')
fp_ids = self.search(cr, uid, [('chart_template_id', '=', chart_temp_id)])
for position in self.browse(cr, uid, fp_ids, context=context):
new_fp = obj_fiscal_position.create(cr, uid, {'company_id': company_id, 'name': position.name, 'note': position.note})
for tax in position.tax_ids:
obj_tax_fp.create(cr, uid, {
'tax_src_id': tax_template_ref[tax.tax_src_id.id],
'tax_dest_id': tax.tax_dest_id and tax_template_ref[tax.tax_dest_id.id] or False,
'position_id': new_fp
})
for acc in position.account_ids:
obj_ac_fp.create(cr, uid, {
'account_src_id': acc_template_ref[acc.account_src_id.id],
'account_dest_id': acc_template_ref[acc.account_dest_id.id],
'position_id': new_fp
})
return True
class account_fiscal_position_tax_template(osv.osv):
_name = 'account.fiscal.position.tax.template'
_description = 'Template Tax Fiscal Position'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position.template', 'Fiscal Position', required=True, ondelete='cascade'),
'tax_src_id': fields.many2one('account.tax.template', 'Tax Source', required=True),
'tax_dest_id': fields.many2one('account.tax.template', 'Replacement Tax')
}
class account_fiscal_position_account_template(osv.osv):
_name = 'account.fiscal.position.account.template'
_description = 'Template Account Fiscal Mapping'
_rec_name = 'position_id'
_columns = {
'position_id': fields.many2one('account.fiscal.position.template', 'Fiscal Mapping', required=True, ondelete='cascade'),
'account_src_id': fields.many2one('account.account.template', 'Account Source', domain=[('type','<>','view')], required=True),
'account_dest_id': fields.many2one('account.account.template', 'Account Destination', domain=[('type','<>','view')], required=True)
}
# ---------------------------------------------------------
# Account generation from template wizards
# ---------------------------------------------------------
class wizard_multi_charts_accounts(osv.osv_memory):
"""
Create a new account chart for a company.
Wizards ask for:
* a company
* an account chart template
* a number of digits for formatting code of non-view accounts
* a list of bank accounts owned by the company
Then, the wizard:
* generates all accounts from the template and assigns them to the right company
* generates all taxes and tax codes, changing account assignations
* generates all accounting properties and assigns them correctly
"""
_name='wizard.multi.charts.accounts'
_inherit = 'res.config'
_columns = {
'company_id':fields.many2one('res.company', 'Company', required=True),
'currency_id': fields.many2one('res.currency', 'Currency', help="Currency as per company's country."),
'only_one_chart_template': fields.boolean('Only One Chart Template Available'),
'chart_template_id': fields.many2one('account.chart.template', 'Chart Template', required=True),
'bank_accounts_id': fields.one2many('account.bank.accounts.wizard', 'bank_account_id', 'Cash and Banks', required=True),
'code_digits':fields.integer('# of Digits', required=True, help="No. of Digits to use for account code"),
"sale_tax": fields.many2one("account.tax.template", "Default Sale Tax"),
"purchase_tax": fields.many2one("account.tax.template", "Default Purchase Tax"),
'sale_tax_rate': fields.float('Sales Tax(%)'),
'purchase_tax_rate': fields.float('Purchase Tax(%)'),
'complete_tax_set': fields.boolean('Complete Set of Taxes', help='This boolean helps you to choose if you want to propose to the user to encode the sales and purchase rates or use the usual m2o fields. This last choice assumes that the set of tax defined for the chosen template is complete'),
}
def _get_chart_parent_ids(self, cr, uid, chart_template, context=None):
""" Returns the IDs of all ancestor charts, including the chart itself.
(inverse of child_of operator)
:param browse_record chart_template: the account.chart.template record
:return: the IDS of all ancestor charts, including the chart itself.
"""
result = [chart_template.id]
while chart_template.parent_id:
chart_template = chart_template.parent_id
result.append(chart_template.id)
return result
def onchange_tax_rate(self, cr, uid, ids, rate=False, context=None):
return {'value': {'purchase_tax_rate': rate or False}}
def onchange_chart_template_id(self, cr, uid, ids, chart_template_id=False, context=None):
res = {}
tax_templ_obj = self.pool.get('account.tax.template')
res['value'] = {'complete_tax_set': False, 'sale_tax': False, 'purchase_tax': False}
if chart_template_id:
data = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
currency_id = data.currency_id and data.currency_id.id or self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
res['value'].update({'complete_tax_set': data.complete_tax_set, 'currency_id': currency_id})
if data.complete_tax_set:
# default tax is given by the lowest sequence. For same sequence we will take the latest created as it will be the case for tax created while isntalling the generic chart of account
chart_ids = self._get_chart_parent_ids(cr, uid, data, context=context)
base_tax_domain = [("chart_template_id", "in", chart_ids), ('parent_id', '=', False)]
sale_tax_domain = base_tax_domain + [('type_tax_use', 'in', ('sale','all'))]
purchase_tax_domain = base_tax_domain + [('type_tax_use', 'in', ('purchase','all'))]
sale_tax_ids = tax_templ_obj.search(cr, uid, sale_tax_domain, order="sequence, id desc")
purchase_tax_ids = tax_templ_obj.search(cr, uid, purchase_tax_domain, order="sequence, id desc")
res['value'].update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False,
'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.setdefault('domain', {})
res['domain']['sale_tax'] = repr(sale_tax_domain)
res['domain']['purchase_tax'] = repr(purchase_tax_domain)
if data.code_digits:
res['value'].update({'code_digits': data.code_digits})
return res
def default_get(self, cr, uid, fields, context=None):
res = super(wizard_multi_charts_accounts, self).default_get(cr, uid, fields, context=context)
tax_templ_obj = self.pool.get('account.tax.template')
account_chart_template = self.pool['account.chart.template']
if 'bank_accounts_id' in fields:
res.update({'bank_accounts_id': [{'acc_name': _('Cash'), 'account_type': 'cash'},{'acc_name': _('Bank'), 'account_type': 'bank'}]})
if 'company_id' in fields:
res.update({'company_id': self.pool.get('res.users').browse(cr, uid, [uid], context=context)[0].company_id.id})
if 'currency_id' in fields:
company_id = res.get('company_id') or False
if company_id:
company_obj = self.pool.get('res.company')
country_id = company_obj.browse(cr, uid, company_id, context=context).country_id.id
currency_id = company_obj.on_change_country(cr, uid, company_id, country_id, context=context)['value']['currency_id']
res.update({'currency_id': currency_id})
ids = account_chart_template.search(cr, uid, [('visible', '=', True)], context=context)
if ids:
#in order to set default chart which was last created set max of ids.
chart_id = max(ids)
if context.get("default_charts"):
model_data = self.pool.get('ir.model.data').search_read(cr, uid, [('model','=','account.chart.template'),('module','=',context.get("default_charts"))], ['res_id'], context=context)
if model_data:
chart_id = model_data[0]['res_id']
chart = account_chart_template.browse(cr, uid, chart_id, context=context)
chart_hierarchy_ids = self._get_chart_parent_ids(cr, uid, chart, context=context)
if 'chart_template_id' in fields:
res.update({'only_one_chart_template': len(ids) == 1,
'chart_template_id': chart_id})
if 'sale_tax' in fields:
sale_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "in", chart_hierarchy_ids),
('type_tax_use', 'in', ('sale','all'))],
order="sequence")
res.update({'sale_tax': sale_tax_ids and sale_tax_ids[0] or False})
if 'purchase_tax' in fields:
purchase_tax_ids = tax_templ_obj.search(cr, uid, [("chart_template_id", "in", chart_hierarchy_ids),
('type_tax_use', 'in', ('purchase','all'))],
order="sequence")
res.update({'purchase_tax': purchase_tax_ids and purchase_tax_ids[0] or False})
res.update({
'purchase_tax_rate': 15.0,
'sale_tax_rate': 15.0,
})
return res
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:context = {}
res = super(wizard_multi_charts_accounts, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
cmp_select = []
acc_template_obj = self.pool.get('account.chart.template')
company_obj = self.pool.get('res.company')
company_ids = company_obj.search(cr, uid, [], context=context)
#display in the widget selection of companies, only the companies that haven't been configured yet (but don't care about the demo chart of accounts)
cr.execute("SELECT company_id FROM account_account WHERE active = 't' AND account_account.parent_id IS NULL AND name != %s", ("Chart For Automated Tests",))
configured_cmp = [r[0] for r in cr.fetchall()]
unconfigured_cmp = list(set(company_ids)-set(configured_cmp))
for field in res['fields']:
if field == 'company_id':
res['fields'][field]['domain'] = [('id','in',unconfigured_cmp)]
res['fields'][field]['selection'] = [('', '')]
if unconfigured_cmp:
cmp_select = [(line.id, line.name) for line in company_obj.browse(cr, uid, unconfigured_cmp)]
res['fields'][field]['selection'] = cmp_select
return res
def check_created_journals(self, cr, uid, vals_journal, company_id, context=None):
"""
This method used for checking journals already created or not. If not then create new journal.
"""
obj_journal = self.pool.get('account.journal')
rec_list = obj_journal.search(cr, uid, [('name','=', vals_journal['name']),('company_id', '=', company_id)], context=context)
if not rec_list:
obj_journal.create(cr, uid, vals_journal, context=context)
return True
def generate_journals(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
"""
This method is used for creating journals.
:param chart_temp_id: Chart Template Id.
:param acc_template_ref: Account templates reference.
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
journal_data = self._prepare_all_journals(cr, uid, chart_template_id, acc_template_ref, company_id, context=context)
for vals_journal in journal_data:
self.check_created_journals(cr, uid, vals_journal, company_id, context=context)
return True
def _prepare_all_journals(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
def _get_analytic_journal(journal_type):
# Get the analytic journal
data = False
try:
if journal_type in ('sale', 'sale_refund'):
data = obj_data.get_object_reference(cr, uid, 'account', 'analytic_journal_sale')
elif journal_type in ('purchase', 'purchase_refund'):
data = obj_data.get_object_reference(cr, uid, 'account', 'exp')
elif journal_type == 'general':
pass
except ValueError:
pass
return data and data[1] or False
def _get_default_account(journal_type, type='debit'):
# Get the default accounts
default_account = False
if journal_type in ('sale', 'sale_refund'):
default_account = acc_template_ref.get(template.property_account_income_categ.id)
elif journal_type in ('purchase', 'purchase_refund'):
default_account = acc_template_ref.get(template.property_account_expense_categ.id)
elif journal_type == 'situation':
if type == 'debit':
default_account = acc_template_ref.get(template.property_account_expense_opening.id)
else:
default_account = acc_template_ref.get(template.property_account_income_opening.id)
return default_account
journal_names = {
'sale': _('Sales Journal'),
'purchase': _('Purchase Journal'),
'sale_refund': _('Sales Refund Journal'),
'purchase_refund': _('Purchase Refund Journal'),
'general': _('Miscellaneous Journal'),
'situation': _('Opening Entries Journal'),
}
journal_codes = {
'sale': _('SAJ'),
'purchase': _('EXJ'),
'sale_refund': _('SCNJ'),
'purchase_refund': _('ECNJ'),
'general': _('MISC'),
'situation': _('OPEJ'),
}
obj_data = self.pool.get('ir.model.data')
analytic_journal_obj = self.pool.get('account.analytic.journal')
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
journal_data = []
for journal_type in ['sale', 'purchase', 'sale_refund', 'purchase_refund', 'general', 'situation']:
vals = {
'type': journal_type,
'name': journal_names[journal_type],
'code': journal_codes[journal_type],
'company_id': company_id,
'centralisation': journal_type == 'situation',
'analytic_journal_id': _get_analytic_journal(journal_type),
'default_credit_account_id': _get_default_account(journal_type, 'credit'),
'default_debit_account_id': _get_default_account(journal_type, 'debit'),
}
journal_data.append(vals)
return journal_data
def generate_properties(self, cr, uid, chart_template_id, acc_template_ref, company_id, context=None):
"""
This method used for creating properties.
:param chart_template_id: id of the current chart template for which we need to create properties
:param acc_template_ref: Mapping between ids of account templates and real accounts created from them
:param company_id: company_id selected from wizard.multi.charts.accounts.
:returns: True
"""
property_obj = self.pool.get('ir.property')
field_obj = self.pool.get('ir.model.fields')
todo_list = [
('property_account_receivable','res.partner','account.account'),
('property_account_payable','res.partner','account.account'),
('property_account_expense_categ','product.category','account.account'),
('property_account_income_categ','product.category','account.account'),
('property_account_expense','product.template','account.account'),
('property_account_income','product.template','account.account'),
]
template = self.pool.get('account.chart.template').browse(cr, uid, chart_template_id, context=context)
for record in todo_list:
account = getattr(template, record[0])
value = account and 'account.account,' + str(acc_template_ref[account.id]) or False
if value:
field = field_obj.search(cr, uid, [('name', '=', record[0]),('model', '=', record[1]),('relation', '=', record[2])], context=context)
vals = {
'name': record[0],
'company_id': company_id,
'fields_id': field[0],
'value': value,
}
property_ids = property_obj.search(cr, uid, [('name','=', record[0]),('company_id', '=', company_id)], context=context)
if property_ids:
#the property exist: modify it
property_obj.write(cr, uid, property_ids, vals, context=context)
else:
#create the property
property_obj.create(cr, uid, vals, context=context)
return True
def _install_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, acc_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function recursively loads the template objects and create the real objects from them.
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if acc_ref is None:
acc_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
if template.parent_id:
tmp1, tmp2, tmp3 = self._install_template(cr, uid, template.parent_id.id, company_id, code_digits=code_digits, acc_ref=acc_ref, taxes_ref=taxes_ref, tax_code_ref=tax_code_ref, context=context)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tax_code_ref.update(tmp3)
tmp1, tmp2, tmp3 = self._load_template(cr, uid, template_id, company_id, code_digits=code_digits, obj_wizard=obj_wizard, account_ref=acc_ref, taxes_ref=taxes_ref, tax_code_ref=tax_code_ref, context=context)
acc_ref.update(tmp1)
taxes_ref.update(tmp2)
tax_code_ref.update(tmp3)
return acc_ref, taxes_ref, tax_code_ref
def _load_template(self, cr, uid, template_id, company_id, code_digits=None, obj_wizard=None, account_ref=None, taxes_ref=None, tax_code_ref=None, context=None):
'''
This function generates all the objects from the templates
:param template_id: id of the chart template to load
:param company_id: id of the company the wizard is running for
:param code_digits: integer that depicts the number of digits the accounts code should have in the COA
:param obj_wizard: the current wizard for generating the COA from the templates
:param acc_ref: Mapping between ids of account templates and real accounts created from them
:param taxes_ref: Mapping between ids of tax templates and real taxes created from them
:param tax_code_ref: Mapping between ids of tax code templates and real tax codes created from them
:returns: return a tuple with a dictionary containing
* the mapping between the account template ids and the ids of the real accounts that have been generated
from them, as first item,
* a similar dictionary for mapping the tax templates and taxes, as second item,
* a last identical containing the mapping of tax code templates and tax codes
:rtype: tuple(dict, dict, dict)
'''
if account_ref is None:
account_ref = {}
if taxes_ref is None:
taxes_ref = {}
if tax_code_ref is None:
tax_code_ref = {}
template = self.pool.get('account.chart.template').browse(cr, uid, template_id, context=context)
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_acc_tax = self.pool.get('account.tax')
obj_tax_temp = self.pool.get('account.tax.template')
obj_acc_template = self.pool.get('account.account.template')
obj_fiscal_position_template = self.pool.get('account.fiscal.position.template')
# create all the tax code.
tax_code_ref.update(obj_tax_code_template.generate_tax_code(cr, uid, template.tax_code_root_id.id, company_id, context=context))
# Generate taxes from templates.
tax_templates = [x for x in template.tax_template_ids]
generated_tax_res = obj_tax_temp._generate_tax(cr, uid, tax_templates, tax_code_ref, company_id, context=context)
taxes_ref.update(generated_tax_res['tax_template_to_tax'])
# Generating Accounts from templates.
account_template_ref = obj_acc_template.generate_account(cr, uid, template_id, taxes_ref, account_ref, code_digits, company_id, context=context)
account_ref.update(account_template_ref)
# writing account values on tax after creation of accounts
for key,value in generated_tax_res['account_dict'].items():
if value['account_collected_id'] or value['account_paid_id']:
obj_acc_tax.write(cr, uid, [key], {
'account_collected_id': account_ref.get(value['account_collected_id'], False),
'account_paid_id': account_ref.get(value['account_paid_id'], False),
})
# Create Journals
self.generate_journals(cr, uid, template_id, account_ref, company_id, context=context)
# generate properties function
self.generate_properties(cr, uid, template_id, account_ref, company_id, context=context)
# Generate Fiscal Position , Fiscal Position Accounts and Fiscal Position Taxes from templates
obj_fiscal_position_template.generate_fiscal_position(cr, uid, template_id, taxes_ref, account_ref, company_id, context=context)
return account_ref, taxes_ref, tax_code_ref
def _create_tax_templates_from_rates(self, cr, uid, obj_wizard, company_id, context=None):
'''
This function checks if the chosen chart template is configured as containing a full set of taxes, and if
it's not the case, it creates the templates for account.tax.code and for account.account.tax objects accordingly
to the provided sale/purchase rates. Then it saves the new tax templates as default taxes to use for this chart
template.
:param obj_wizard: browse record of wizard to generate COA from templates
:param company_id: id of the company for wich the wizard is running
:return: True
'''
obj_tax_code_template = self.pool.get('account.tax.code.template')
obj_tax_temp = self.pool.get('account.tax.template')
chart_template = obj_wizard.chart_template_id
vals = {}
all_parents = self._get_chart_parent_ids(cr, uid, chart_template, context=context)
# create tax templates and tax code templates from purchase_tax_rate and sale_tax_rate fields
if not chart_template.complete_tax_set:
value = obj_wizard.sale_tax_rate
ref_tax_ids = obj_tax_temp.search(cr, uid, [('type_tax_use','in', ('sale','all')), ('chart_template_id', 'in', all_parents)], context=context, order="sequence, id desc", limit=1)
obj_tax_temp.write(cr, uid, ref_tax_ids, {'amount': value/100.0, 'name': _('Tax %.2f%%') % value})
value = obj_wizard.purchase_tax_rate
ref_tax_ids = obj_tax_temp.search(cr, uid, [('type_tax_use','in', ('purchase','all')), ('chart_template_id', 'in', all_parents)], context=context, order="sequence, id desc", limit=1)
obj_tax_temp.write(cr, uid, ref_tax_ids, {'amount': value/100.0, 'name': _('Purchase Tax %.2f%%') % value})
return True
def execute(self, cr, uid, ids, context=None):
'''
This function is called at the confirmation of the wizard to generate the COA from the templates. It will read
all the provided information to create the accounts, the banks, the journals, the taxes, the tax codes, the
accounting properties... accordingly for the chosen company.
'''
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
obj_data = self.pool.get('ir.model.data')
ir_values_obj = self.pool.get('ir.values')
obj_wizard = self.browse(cr, uid, ids[0])
company_id = obj_wizard.company_id.id
self.pool.get('res.company').write(cr, uid, [company_id], {'currency_id': obj_wizard.currency_id.id})
# When we install the CoA of first company, set the currency to price types and pricelists
if company_id==1:
for ref in (('product','list_price'),('product','standard_price'),('product','list0'),('purchase','list0')):
try:
tmp2 = obj_data.get_object_reference(cr, uid, *ref)
if tmp2:
self.pool[tmp2[0]].write(cr, uid, tmp2[1], {
'currency_id': obj_wizard.currency_id.id
})
except ValueError:
pass
# If the floats for sale/purchase rates have been filled, create templates from them
self._create_tax_templates_from_rates(cr, uid, obj_wizard, company_id, context=context)
# Install all the templates objects and generate the real objects
acc_template_ref, taxes_ref, tax_code_ref = self._install_template(cr, uid, obj_wizard.chart_template_id.id, company_id, code_digits=obj_wizard.code_digits, obj_wizard=obj_wizard, context=context)
# write values of default taxes for product as super user
if obj_wizard.sale_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.product', "taxes_id", [taxes_ref[obj_wizard.sale_tax.id]], for_all_users=True, company_id=company_id)
if obj_wizard.purchase_tax and taxes_ref:
ir_values_obj.set_default(cr, SUPERUSER_ID, 'product.product', "supplier_taxes_id", [taxes_ref[obj_wizard.purchase_tax.id]], for_all_users=True, company_id=company_id)
# Create Bank journals
self._create_bank_journals_from_o2m(cr, uid, obj_wizard, company_id, acc_template_ref, context=context)
return {}
def _prepare_bank_journal(self, cr, uid, line, current_num, default_account_id, company_id, context=None):
'''
This function prepares the value to use for the creation of a bank journal created through the wizard of
generating COA from templates.
:param line: dictionary containing the values encoded by the user related to his bank account
:param current_num: integer corresponding to a counter of the already created bank journals through this wizard.
:param default_account_id: id of the default debit.credit account created before for this journal.
:param company_id: id of the company for which the wizard is running
:return: mapping of field names and values
:rtype: dict
'''
obj_data = self.pool.get('ir.model.data')
obj_journal = self.pool.get('account.journal')
# we need to loop again to find next number for journal code
# because we can't rely on the value current_num as,
# its possible that we already have bank journals created (e.g. by the creation of res.partner.bank)
# and the next number for account code might have been already used before for journal
for num in xrange(current_num, 100):
# journal_code has a maximal size of 5, hence we can enforce the boundary num < 100
journal_code = _('BNK')[:3] + str(num)
ids = obj_journal.search(cr, uid, [('code', '=', journal_code), ('company_id', '=', company_id)], context=context)
if not ids:
break
else:
raise osv.except_osv(_('Error!'), _('Cannot generate an unused journal code.'))
vals = {
'name': line['acc_name'],
'code': journal_code,
'type': line['account_type'] == 'cash' and 'cash' or 'bank',
'company_id': company_id,
'analytic_journal_id': False,
'currency': False,
'default_credit_account_id': default_account_id,
'default_debit_account_id': default_account_id,
}
if line['currency_id']:
vals['currency'] = line['currency_id']
return vals
def _prepare_bank_account(self, cr, uid, line, new_code, acc_template_ref, ref_acc_bank, company_id, context=None):
'''
This function prepares the value to use for the creation of the default debit and credit accounts of a
bank journal created through the wizard of generating COA from templates.
:param line: dictionary containing the values encoded by the user related to his bank account
:param new_code: integer corresponding to the next available number to use as account code
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:param ref_acc_bank: browse record of the account template set as root of all bank accounts for the chosen
template
:param company_id: id of the company for which the wizard is running
:return: mapping of field names and values
:rtype: dict
'''
obj_data = self.pool.get('ir.model.data')
# Get the id of the user types fr-or cash and bank
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_cash')
cash_type = tmp and tmp[1] or False
tmp = obj_data.get_object_reference(cr, uid, 'account', 'data_account_type_bank')
bank_type = tmp and tmp[1] or False
return {
'name': line['acc_name'],
'currency_id': line['currency_id'],
'code': new_code,
'type': 'liquidity',
'user_type': line['account_type'] == 'cash' and cash_type or bank_type,
'parent_id': acc_template_ref[ref_acc_bank.id] or False,
'company_id': company_id,
}
def _create_bank_journals_from_o2m(self, cr, uid, obj_wizard, company_id, acc_template_ref, context=None):
'''
This function creates bank journals and its accounts for each line encoded in the field bank_accounts_id of the
wizard.
:param obj_wizard: the current wizard that generates the COA from the templates.
:param company_id: the id of the company for which the wizard is running.
:param acc_template_ref: the dictionary containing the mapping between the ids of account templates and the ids
of the accounts that have been generated from them.
:return: True
'''
obj_acc = self.pool.get('account.account')
obj_journal = self.pool.get('account.journal')
code_digits = obj_wizard.code_digits
# Build a list with all the data to process
journal_data = []
if obj_wizard.bank_accounts_id:
for acc in obj_wizard.bank_accounts_id:
vals = {
'acc_name': acc.acc_name,
'account_type': acc.account_type,
'currency_id': acc.currency_id.id,
}
journal_data.append(vals)
ref_acc_bank = obj_wizard.chart_template_id.bank_account_view_id
if journal_data and not ref_acc_bank.code:
raise osv.except_osv(_('Configuration Error!'), _('You have to set a code for the bank account defined on the selected chart of accounts.'))
current_num = 1
for line in journal_data:
# Seek the next available number for the account code
while True:
new_code = str(ref_acc_bank.code.ljust(code_digits-len(str(current_num)), '0')) + str(current_num)
ids = obj_acc.search(cr, uid, [('code', '=', new_code), ('company_id', '=', company_id)])
if not ids:
break
else:
current_num += 1
# Create the default debit/credit accounts for this bank journal
vals = self._prepare_bank_account(cr, uid, line, new_code, acc_template_ref, ref_acc_bank, company_id, context=context)
default_account_id = obj_acc.create(cr, uid, vals, context=context)
#create the bank journal
vals_journal = self._prepare_bank_journal(cr, uid, line, current_num, default_account_id, company_id, context=context)
obj_journal.create(cr, uid, vals_journal)
current_num += 1
return True
class account_bank_accounts_wizard(osv.osv_memory):
_name='account.bank.accounts.wizard'
_columns = {
'acc_name': fields.char('Account Name.', size=64, required=True),
'bank_account_id': fields.many2one('wizard.multi.charts.accounts', 'Bank Account', required=True, ondelete='cascade'),
'currency_id': fields.many2one('res.currency', 'Secondary Currency', help="Forces all moves for this account to have this secondary currency."),
'account_type': fields.selection([('cash','Cash'), ('check','Check'), ('bank','Bank')], 'Account Type', size=32),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Subby4Git/tbed-parser | src/dialogueAct.py | 2 | 27480 | #!/usr/bin/env python2.5
import re
from string import *
from copy import *
from collections import *
from utils import *
from slot import *
from transformation import *
from trigger import *
class DialogueAct:
def __init__(self, cuedDA, origSentence, raspData, db, settings):
# train data values
self.cuedDA = cuedDA
self.db = db
self.settings = settings
self.origText = origSentence
self.normText = origSentence
self.normText = prepareForRASP(origSentence, self.db, False)
self.normText = separateApostrophes(self.normText)
self.raspText = self.normText
if self.settings['useDeps'] == 0:
self.lemmas = self.getLemmas(self.normText)
self.posTags = self.getPOSTags(self.normText)
else:
# separate deps and text
raspSenetence, raspDeps = raspData.strip().split('|||')
# separate text, lemmas, and POS tags, deps
self.raspText = self.getText(raspSenetence)
self.lemmas = self.getLemmas(raspSenetence)
self.posTags = self.getPOSTags(raspSenetence)
self.deps = self.getDeps(raspDeps)
# print problematic input
# both normalized text and text from RASP should have the same number
# of terms
if len(self.normText.split(' ')) != len(self.raspText.split(' ')):
print 'Warning:'
print len(self.normText.split(' ')), self.normText
print len(self.raspText.split(' ')), self.raspText
print
self.speechAct = ''
self.slots = []
self.tbedSpeechAct = ''
self.tbedSlots = []
self.valueDictCounter = defaultdict(int)
self.valueDict = {}
# in this array I store all rules applied on the DA in
# the sequencial order
self.ruleTracker = []
def __str__(self):
s = self.text+' - '
s+= self.cuedDA+' - '
s+= self.speechAct+' - '
s+= self.tbedSpeechAct+' - '
s+= str(self.slots)+' - '
s+= str(self.tbedSlots)+' - '
return s
def computeBorders(self):
'''
This method compute borders for each slot in this dialogue act.
Borders of each slot definy proximity 'atrea' where the lexical
triggers must be triggered so that SubSlot operation chould be
applied on particular slot.
FIX: Some slot might dominate the same words; as a result, they shoud have
the same borders. It is not implemented now.
'''
for each in self.tbedSlots:
if not each.lexIndex:
raise ValueError('Slot has lexical alignment set.')
each.leftMiddle = min(each.lexIndex)
each.rightMiddle = max(each.lexIndex)
self.tbedSlots.sort(cmp=lambda x, y: cmp(x.leftMiddle, y.leftMiddle))
if len(self.tbedSlots) > 0:
self.tbedSlots[0].leftBorder = 0
for i in range(1, len(self.tbedSlots)):
self.tbedSlots[i].leftBorder = self.tbedSlots[i-1].rightMiddle+1
for i in range(0, len(self.tbedSlots)-1):
self.tbedSlots[i].rightBorder = self.tbedSlots[i+1].leftMiddle-1
self.tbedSlots[-1].rightBorder = len(self.words)-1
for i in range(len(self.tbedSlots)):
if self.tbedSlots[i].leftBorder > self.tbedSlots[i].leftMiddle:
self.tbedSlots[i].leftBorder = self.tbedSlots[i].leftMiddle
if self.tbedSlots[i].rightBorder < self.tbedSlots[i].rightMiddle:
self.tbedSlots[i].rightBorder = self.tbedSlots[i].rightMiddle
## print '+'*80
## print self.text
## for each in self.tbedSlots:
## print each.renderCUED(), each.leftBorder, each.leftMiddle, each.rightMiddle, each.rightBorder
def writeAlignment(self, f):
f.write('.'*80+'\n')
f.write(' '.join(['%s' % w for w in self.words])+'\n')
f.write(' '.join(['%*d' % (len(w), i) for i, w in enumerate(self.words)])+'\n')
for each in self.tbedSlots:
numberOfLeftDots = sum([len(w)+1 for i, w in enumerate(self.words) if i < each.leftBorder])
numberOfLeftDashes = sum([len(w)+1 for i, w in enumerate(self.words) if i >= each.leftBorder and i<each.leftMiddle])
numberOfEquals = sum([len(w)+1 for i, w in enumerate(self.words) if i >= each.leftMiddle and i<=each.rightMiddle])
numberOfRightDashes = sum([len(w)+1 for i, w in enumerate(self.words) if i > each.rightMiddle and i<=each.rightBorder])
numberOfRightDots = sum([len(w)+1 for i, w in enumerate(self.words) if i > each.rightBorder])
f.write(' '*numberOfLeftDots)
f.write('-'*numberOfLeftDashes)
f.write('='*numberOfEquals)
f.write('-'*numberOfRightDashes)
f.write(' '*numberOfRightDots+'\n')
if hasattr(self, 'valueDictPositions'):
f.write('%50s (%.2d,%.2d,%.2d,%.2d) <= %s\n' % (each.renderTBED(False, self.valueDictPositions, self.words), each.leftBorder, each.leftMiddle, each.rightMiddle, each.rightBorder, str(sorted(each.lexIndex))))
f.write('.'*80+'\n')
def getPOSTags(self, text):
t = text.split()
p = re.compile(r'.*_')
pos = [p.sub('', x) for x in t]
self.allPOSTags = set(pos)
if '.' in self.allPOSTags:
self.allPOSTags.remove('.')
return pos
def getLemmas(self, text):
t = text.split()
p = re.compile(r'(\+\w+|):\d+_\S+$')
pos = [p.sub('', x) for x in t]
return pos
def getText(self, text):
t = text.split()
p = re.compile(r':\d+_\S+$')
t = [p.sub('', x) for x in t]
pos = ' '.join(t)
return pos
def getDeps(self, deps):
""" This method process dependencies from RASP 2.0.
The method create and return a dictionary which contains for each
position in the self.words (and also self.lemmas, self.POSTags) dependecy
link to another position(s). Each link contains: type if link (obj,
ncsubj, ...) a
"""
if not deps:
return dict()
## print '='*80
## print self.normText
## print self.raspText
## print self.lemmas
deps = deps[1:-1].replace('|', '').split(')(')
## print deps
# remove POS tags
p = re.compile(r'_[$2&a-z]+')
deps = [p.sub('', x) for x in deps]
## print deps
depsDict = {}
for d in deps:
d = d.split()
d = [x for i, x in enumerate(d) if i == 0 or x.find(':') != -1]
if len(d) != 3:
# ignore all non direct (simple) dependecies
continue
d[1] = d[1].split(':')
d[1][1] = int(d[1][1]) - 1
d[2] = d[2].split(':')
d[2][1] = int(d[2][1]) - 1
if len(d) != 3:
print '*'*3, d
# build the links, from leaves to roots, index is position in the
# sentence
# build only tree, do not allow overwriting
# how ever control what is kept in the tree
if d[2][1] in depsDict:
# ignore ncsubj dependecy, e.g.:
# hi i need to get a flight from memphis to salt-lake-city
# depart+ing before 10am .
# --- ['dobj', ['to', 9], ['salt-lake-city', 10]]
# +++ ['ncsubj', ['depart+ing', 11], ['salt-lake-city', 10]]
#
if d[0] == 'ncsubj':
continue
# 'dobj' dependecy is more interesting
if depsDict[d[2][1]][0] == 'dobj' and d[0] == 'obj':
continue
## print '#'*1000
## print '-'*3, rd[d[2][1]]
## print '+'*3, d
depsDict[d[2][1]] = d
else:
# no colision addit freely
depsDict[d[2][1]] = d
## print rd
return depsDict
def parse(self):
cuedDA = self.cuedDA
numOfDAs = len(splitByComma(cuedDA))
if numOfDAs > 1:
raise ValueError('Too many DAs in input text.')
# get the speech-act
i = cuedDA.index("(")
speechAct = cuedDA[:i]
slots = cuedDA[i:].lower()
slots = slots.replace('(', '')
slots = slots.replace(')', '')
slts = []
if slots == '':
# no slots to process
slots = []
else:
# split slots
slots = splitByComma(slots)
for slt in slots:
try:
s = Slot(slt, self.settings)
s.parse()
slts.append(s)
except ValueError:
# check for invalid slot items
pass
self.speechAct = speechAct
self.slots = slts
def replaceSV(self, text, sn, sv, i):
return text[0:i]+sn+text[i+len(sv):]
def replaceDBItems(self):
"""
This method replace all lexical ralizations of database items
by some tags in form 'sv_slotname-N', where N is counter of occurence in
the sentence. There is a problem if some slot value occures in different
slots (names). Than only one association is made correctly.
There is also another issue with db items. Some words are replaced
although they are not db items. For example a word 'one' is replaced by
'sv-stars-1' in sentence 'I would like this one', which is apparently
wrong.
"""
if self.settings == None:
return
if self.settings['DBItems'] != 'replace':
return
## f = file('dbItemsReplacement.txt', 'a')
## f.write('#'*80+'\n')
## f.write('Text: '+ self.normText+'\n')
for (sn, sv, svs, c, cc) in self.db.values:
i = 0
while True:
i = self.normText.find(svs,i)
if i != -1:
# test wheather there are spaces around the word. that it is not a
# substring of another word!
if i > 0 and self.normText[i-1] != ' ':
i += 1
continue
if i < len(self.normText)-len(svs) and self.normText[i+len(svs)] != ' ':
i += 1
continue
# I found the slot value synonym from database in the
# sentence, I must replace it
newSV1 = 'sv_'+sn
self.valueDictCounter[newSV1] += 1
newSV2 = newSV1+'-'+str(self.valueDictCounter[newSV1])
self.valueDict[newSV2] = (sv, svs)
self.normText = self.replaceSV(self.normText, newSV2, svs, i)
# find slot which match
for slt in self.slots:
# I do not check wheter there is the same name of the slot name
# for the substituted slot value. If I chose a wrong slot
# value label, I have to learn how to fix it
## if slt.value == "new york":
## print slt.value
## print sn, sv, self.db[sn][sv]
if slt.value in self.db[sn][sv]:
slt.origValue = slt.value
slt.value = newSV2
break
else:
break
self.words = split(self.normText)
# now I have to get rid of indexes and create dictionary
# with positions and correct slot values
self.valueDictPositions = {}
for i, w in enumerate(self.words):
if w.startswith('sv_'):
self.valueDictPositions[i] = self.valueDict[w]
self.words = split(self.normText)
sv_count = {}
sv_map = {}
for p in sorted(self.valueDictPositions):
sv_count[self.words[p][:-2]] = 0
for p in sorted(self.valueDictPositions):
sv_map[self.words[p]] = self.words[p][:-2]+'-'+str(sv_count[self.words[p][:-2]] % 2)
sv_count[self.words[p][:-2]] += 1
## print '-'*80
## print self.text
#### print sv_count
## print sv_map
#### print
##
## for p in sorted(self.valueDictPositions):
## print p, self.words[p], sv_map[self.words[p]], self.valueDictPositions[p]
# update self.words
for i, w in enumerate(self.words):
if w.startswith('sv_'):
self.words[i] = sv_map[w]
# as a result, I have to update text
self.normText = ' '.join(self.words)
# update slot values
for slt in self.slots:
if slt.value in sv_map:
slt.value = sv_map[slt.value]
## for k, v in sorted(self.valueDictPositions.items()):
## f.write('Subst value: %2d => %30s = %s\n' % (k, v, self.words[k]))
## f.write('DB Text: '+ self.normText+'\n')
## f.write('Slots: '+ str([x.renderCUED(False) for x in self.slots]))
## f.write('\n')
## f.close()
## print '#', self.lemmas
# replace DB items in lemmas
try:
for i in range(len(self.lemmas)):
if self.words[i].startswith('sv_'):
self.lemmas[i] = self.words[i]
except IndexError, ei:
print self.origText
print self.normText
print self.raspText
print self.words
print self.lemmas
raise ei
## print '-', self.words
## print '+', self.lemmas
def genGrams(self, gramIDF):
if not hasattr(self, 'settings'):
return
try:
if self.settings['nearestDepTreePOSWord']:
pass
except KeyError:
self.settings['nearestDepTreePOSWord'] = 0
if not hasattr(self, 'word'):
# I did not run replaceDBItems(); as a result, I have to split text
self.words = split(self.normText)
self.grams = defaultdict(set)
# generate regular unigrams, bigrams, trigrams, ... from text
for i in range(len(self.words)):
self.grams[(self.words[i],)].add((i,i))
if self.settings['nGrams'] >=2:
for i in range(1, len(self.words)):
self.grams[(self.words[i-1],self.words[i])].add((i-1, i))
if self.settings['nGrams'] >=3:
for i in range(2, len(self.words)):
self.grams[(self.words[i-2],self.words[i-1],self.words[i])].add((i-2,i))
if self.settings['nGrams'] >=4:
for i in range(3, len(self.words)):
self.grams[(self.words[i-3],self.words[i-2],self.words[i-1],self.words[i])].add((i-3,i))
# generate skiping (I call it star) bigrams
if self.settings['nStarGrams'] >=3:
for i in range(2, len(self.words)):
self.grams[(self.words[i-2],'*1',self.words[i])].add((i-2, i))
if self.settings['nStarGrams'] >=4:
for i in range(3, len(self.words)):
self.grams[(self.words[i-3],'*2',self.words[i])].add((i-3, i))
if self.settings['nStarGrams'] >=5:
for i in range(4, len(self.words)):
self.grams[(self.words[i-4],'*3',self.words[i])].add((i-4, i))
# generate nearest left POS tag lemma bigrams
if self.settings['useDeps'] and self.settings['nearestLeftPOSWord']:
for i, w in enumerate(self.words):
# generate long ranging dependencies only for slot values
if w.startswith('sv_'):
# generate LRD for all pos tags, the learning alg. will
# chose all suitable POS resp. triggers
for pos in self.allPOSTags:
# find nearest left POS tag
for j in range(i-1, -1, -1):
if self.posTags[j] == pos:
# I got the nearest left POS tag
## if j < i-1:
## print w
## print pos
## print j, i
## print self.origText
## print self.normText
## print self.words[j]
## print self.lemmas[j]
## print self.posTags[j]
## print (self.lemmas[j],'*nl-'+pos,w)
## print len(self.words), self.words
## print len(self.lemmas),self.lemmas
## print len(self.posTags),self.posTags
## print '='*80
# I am interested only in non slot values
if not self.lemmas[j].startswith('sv_'):
self.grams[(self.lemmas[j],'*nl-'+pos,w)].add((i, i))
# do not search for any POS pos word any more
break
# generate nearest dep tree POS tag lemma bigrams
if self.settings['useDeps'] and self.settings['nearestDepTreePOSWord']:
for i, w in enumerate(self.words):
# generate long ranging dependencies only for slot values
if w.startswith('sv_'):
# generate LRD for all pos tags, the learning alg. will
# chose all suitable POS resp. triggers
for pos in self.allPOSTags:
## print '='*80
## print self.allPOSTags
## print self.normText
## print self.posTags
## print self.lemmas[i]
## print '-'*80
j = self.nearestDepTreePosWord(i, pos)
if j != -1:
self.grams[(self.lemmas[j],'*nt-'+pos,w)].add((i, i))
## print '+++',(self.lemmas[j],'*nt-'+pos,w)
# delete all 'dot' grams, they are useless
grms = self.grams.keys()
for g in grms:
if '.' in g:
del self.grams[g]
for g in self.grams:
gramIDF[g] += 1.0
def nearestDepTreePosWord(self, i, pos, r = 0):
r += 1
if r > 10:
return -1
try:
j = self.deps[i][1][1]
## print pos, self.posTags[j], self.deps[i]
except KeyError:
# pos wos not found
return -1
if self.posTags[j] == pos:
## print j, self.lemmas[j]
if self.lemmas[j].startswith('sv_'):
# I am interested only in non slot values
return -1
else:
return j
else:
return self.nearestDepTreePosWord(j, pos, r)
def removeGrams(self, remGrams):
""" I prune all grams from the list remGrams. Only singleton grams across
all dialogue acts should be removed. The method returns all deleted grams.
Once deleted gram does not have to be tested again becasue we know that was
a singleton across all DAs.
"""
ret = []
for rg in remGrams:
if rg in self.grams:
del self.grams[rg]
ret.append(rg)
return ret
def renderCUED(self, origSV=False):
DA = self.speechAct
rendered_slots = ','.join([each_slot.renderCUED(origSV) for each_slot in self.slots])
DA += '('+rendered_slots+')'
return DA
def renderTBED(self, origSV = True):
DA = self.tbedSpeechAct
if hasattr(self, 'valueDictPositions'):
rendered_slots = [each_slot.renderTBED(origSV, self.valueDictPositions,self.words) for each_slot in self.tbedSlots]
else:
rendered_slots = [each_slot.renderTBED(origSV, None, self.words) for each_slot in self.tbedSlots]
# I filter out sequencial duplicities among slot
# in other words on the output of the parser cannot be two same slots behind each
# other => must be true: slts[i] != slts[i+1]
rendered_slots_filtered = []
if len(rendered_slots) > 0:
rendered_slots_filtered.append(rendered_slots[0])
for slt in rendered_slots[1:]:
if slt != rendered_slots_filtered[-1]:
rendered_slots_filtered.append(slt)
DA += '('+','.join(rendered_slots_filtered)+')'
return DA
def renderText(self):
return self.origText
def getNumOfSlots(self):
return len(self.slots)
def measure(self):
# get similarity measure
if self.tbedSpeechAct == self.speechAct:
Ha = 1
else:
Ha = 0
Na = 1
# slots measures
Hi = len(self.tbedSlots&self.slots)
Ri = len(self.tbedSlots)
Ni = len(self.slots)
return (Ha, Na, Hi, Ri, Ni)
def incorrectTbed(self):
if self.speechAct != self.tbedSpeechAct or self.slots != self.tbedSlots:
return True
return False
def getErrors(self, dats, missingSlots, extraSlots, substitutedSlots):
if self.speechAct != self.tbedSpeechAct:
dats[self.speechAct].append(self)
for each in self.getMissingSlotItems():
missingSlots[each].append(self)
for each in self.getExtraSlotItems():
extraSlots[each].append(self)
for mi in self.getMissingSlotItems():
for ei in self.getExtraSlotItems():
if mi.value == ei.value:
substitutedSlots[mi][ei].append(self)
return
def getMissingSlotItems(self):
msi = []
for si in self.slots:
if si not in self.tbedSlots:
msi.append(si)
return msi
def getExtraSlotItems(self):
esi = []
for si in self.tbedSlots:
if si not in self.slots:
esi.append(si)
return esi
def genTrans(self):
# return a set of all posible modifications of the current DA
# I use the dictionary to measure how many times the transformation
# can be potentinaly usevul for this DA. Most of teh time the value
# will be equal zero but of DAs it might be different.
# If I estimate occurences properly. I can sort rules more preciselly.
trans = defaultdict(int)
# speech acts
# return transformation for speech act only if the
# tbedSpeechAct is wrong
if self.speechAct != self.tbedSpeechAct:
trans[Transformation(speechAct=self.speechAct)] += 1
# slot & values
# return transformation for slot & value only if the
# the slot&value is missing or is it should not be here is wrong
# self.tbedSlots array is list which we update (improve)
missingSlotItems = self.getMissingSlotItems()
extraSlotItems = self.getExtraSlotItems()
for slot in missingSlotItems:
trans[Transformation(addSlot=slot)] += 1
for slot in extraSlotItems:
trans[Transformation(delSlot=slot)] += 1
for extraSlot in extraSlotItems:
for missingSlot in missingSlotItems:
# allow to substitute the equal sign
## if extraSlot.equal != missingSlot.equal and extraSlot.value == missingSlot.value:
if extraSlot.equal != missingSlot.equal:
es = deepcopy(extraSlot)
ms = deepcopy(missingSlot)
es.name = None
es.value = None
ms.name = None
ms.value = None
trans[Transformation(subSlot=(es, ms, 'left'))] += 1
# allow to substitute the name
if extraSlot.name != missingSlot.name:
es = deepcopy(extraSlot)
ms = deepcopy(missingSlot)
es.equal = None
es.value = None
ms.equal = None
ms.value = None
trans[Transformation(subSlot=(es, ms, 'left'))] += 1
for t in trans:
t.occurence += trans[t]
# do not explode transformations, only one modification
# at one time is allowed
return set(trans.keys())
def genTriggers(self):
# collect all posible triggers for all dimmensions
# (speechAct, grams, slots)
# a gram must be always part of the trigger
saCond = [None,]
if self.settings['speechAct'] >=1:
saCond.append(self.tbedSpeechAct)
slotsCond = [None,]
if self.settings['nSlots'] >= 1:
for slot in self.tbedSlots:
slotsCond.append([deepcopy(slot),])
# sentece hasSlot trigger, None mean I do not care
hasSlotsCond = [None,]
if self.settings['hasSlots'] >= 1:
if len(self.tbedSlots) == 0:
hasSlotsCond.append(1) # False
else:
hasSlotsCond.append(2) # True
# generate triggers
triggers = set()
# explode trigger combinations
for sa in saCond:
for gram in self.grams:
for slot in slotsCond:
for hasSlots in hasSlotsCond:
triggers.add(
Trigger(speechAct=sa,
gram=gram,
slots=slot,
hasSlots=hasSlots))
return triggers
| gpl-2.0 |
fourwood/OutflowCone | example/cone_example.py | 1 | 7656 | #!/usr/bin/env python3
# Should be all Python3 compatible, and I'd suggest keeping it that way.
# However, it should *also* be all Python2.7 compatible, as well.
from __future__ import print_function, division
import numpy as np
import OutflowCone as oc
# How many clouds to generate in the cone model?
# 1e4 gives a *really, really* rough result, but probably okay for seeing things.
# 1e5 is pretty fine, but still sees slight variation with different random seeds.
# 1e6 is pretty smooth. Takes a bit longer, though, of course.
n_points = 1e5
inc = 45
PA = 90
theta = 60
r_in = 0.5
r_out = 6.0
# Makes sure the bottom of the cone is at the midplane:
zero_z = True
# Makes the bottom of the cone flat, rather than a sector of a spherical surface:
flatten = True
vel_vec = np.array([30., 0, 225.])
# Rotation curve paramters
C, p = (0.5, 1.35)
accel = vel_vec[0]
# The code implicitly assumes the line-of-sight is directed along -z. A.K.A. anything
# moving _into_ the LOS is moving in the +z direction. So this next variable is just
# a reminder, and you probably shouldn't try to change this without going into
# the model code.
#LOS = np.array([0, 0, -1])
## LOS grid
# ~plate scale in kpc/pix
step = 0.1
# Xs, aka RA
x_min, x_max = (-1, 1)
# This will represent the lower bin edges, essentially.
sky_xs = np.arange(0, x_max-x_min, step)
nxs = len(sky_xs)
sky_xs -= step * (nxs//2)
# Ys, aka dec
y_min, y_max = (-1, 1)
# This will represent the lower bin edges, essentially.
sky_ys = np.arange(0, y_max-y_min, step)
nys = len(sky_ys)
sky_ys -= step * (nys//2)
cone = oc.Cone(inc=inc, PA=PA, theta=theta, r_in=r_in, r_out=r_out)
# 'flatten' means make it so the cone is flat on the bottom at a disk height of r_in;
# otherwise it'll be a spherical surface of radius r_in. If zero_z is true, the minimum
# z-height of the cone will be 0, otherwise it'll be some non-zero value based on r_in.
cone.GenerateClouds(n_points, flatten=flatten, zero_z=zero_z)
# Make a rotation curve for each point.
sph_vels = np.repeat(np.asarray([vel_vec]), cone._n_clouds, axis=0)
R_max0 = r_in * np.sin(np.radians(theta))
R_maxs = R_max0 + cone._local_zs * np.tan(np.radians(theta))
Rs = np.sqrt(cone._local_xs**2+cone._local_ys**2)
R0s = R_max0 * Rs / R_maxs
rot_vels = oc.RotCurve(vel_vec[2], R0s, C=C, p=p)
rot_vels *= R0s / cone._local_rs #Ang. mom. cons.
sph_vels[:,2] = rot_vels
R_0 = 1. # kpc normalization for acceleration
sph_vels[:,0] += 1.0 * accel * (cone._local_rs / R_0)
# This function will convert the spherical-coordinate velocities
# into Cartesian, accounting for inclination.
cone.SetLocalVels(sph_vels)
## LOS is ~hard-coded, so these are equivalent.
#proj_vels = -cone._vzs
proj_vels = cone.LOS_vs
# Now we're gonna bin up velocities in the LOS grid and do some convolutions.
max_vels = np.zeros((nys, nxs))
fwhms = np.zeros_like(max_vels)
radii = np.sqrt(cone._xs**2 + cone._ys**2 + cone._zs**2)
weights = 1/radii**2 # 1/r^2 weighting approximates photoionization
inst_res = 17.0
turb_sigma = 90.0
total_broadening = np.sqrt(inst_res**2 + turb_sigma**2)
for j, y in enumerate(sky_ys):
for i, x in enumerate(sky_xs):
masked_vels = cone.GetLOSCloudVels((x, y), step)
mask = ~masked_vels.mask # foo is a masked array, we want what *isn't* masked, really.
bin_vels = masked_vels[mask]
bin_weights = weights[mask]
bin_radii = radii[mask]
if len(bin_vels) > 0:
v_min, v_max = (-500., 500.) # Enough to encompass everything...
bin_size = 5. # but probably shouldn't be hard-coded.
hist_bins = np.linspace(v_min, v_max, (v_max-v_min)/bin_size+1)
n, edges = np.histogram(bin_vels, bins=hist_bins, weights=bin_weights)
# Convolve the velocity profile with turb/inst broadening
gauss_bins = np.linspace(10*v_min,
10*v_max,
(10*v_max-10*v_min)/bin_size+1)
gauss = np.exp(-gauss_bins**2/(2*total_broadening**2)) / \
(total_broadening*np.sqrt(2*np.pi)) * bin_size
convol = np.convolve(n, gauss, mode='same')
## Estimate the FWHM.
#g_max_idx = np.where(convol == np.max(convol))[0][0]
#g_max = convol[g_max_idx]
#g_hmax = g_max / 2.
#left_hmax_idx = np.abs(convol[:g_max_idx]-g_hmax).argmin()
#right_hmax_idx = np.abs(convol[g_max_idx:]-g_hmax).argmin() + g_max_idx
#fwhm_est = gauss_bins[right_hmax_idx] - gauss_bins[left_hmax_idx]
#fwhms[j,i] = fwhm_est
max_vels[j,i] = np.average(gauss_bins, weights=convol)
else:
max_vels[j,i] = np.nan
## Print some stats:
#print("Outflow velocity stats:")
#print("Minimum:\t\t{:0.1f}".format(np.min(sph_vels[:,0])))
#print("Maximum:\t\t{:0.1f}".format(np.max(sph_vels[:,0])))
#print("Median:\t\t\t{:0.1f}".format(np.median(sph_vels[:,0])))
#print("Average:\t\t{:0.1f}".format(np.average(sph_vels[:,0])))
#print("1/r^2 weighted-average:\t{:0.1f}".format(np.average(sph_vels[:,0],
# weights=1/radii**2)))
###############
## Plotting! ##
###############
import pylab as plt
import matplotlib as mpl
import matplotlib.ticker as ticker
cmap = plt.cm.RdBu_r
cmap_vals = cmap(np.arange(256))
scale = 0.75
new_cmap_vals = np.array([[scale*val[0],
scale*val[1],
scale*val[2],
scale*val[3]] for val in cmap_vals])
new_cmap = mpl.colors.LinearSegmentedColormap.from_list("new_cmap", new_cmap_vals)
params = {'figure.subplot.left': 0.10,
'figure.subplot.right': 0.93,
'figure.subplot.bottom': 0.12,
'figure.subplot.top': 0.97,
'text.usetex': True,
'font.size': 10}
plt.rcParams.update(params)
figsize = (3.35, 2.75)
fig, ax = plt.subplots(figsize=figsize)
## LOS grid
#for y in sky_ys:
# for x in sky_xs:
# plt.plot([x, x], [y_min, y_max],
# 'k-', linewidth=0.1, zorder=0.1)
# plt.plot([x_min, x_max], [y, y],
# 'k-', linewidth=0.1, zorder=0.1)
v_min, v_max = (-150, 150)
# Main image
n_levels = 41
levels = np.linspace(v_min, v_max, n_levels)
extents = (sky_xs[0], sky_xs[-1]+step,
sky_ys[0], sky_ys[-1]+step)
img = ax.contourf(max_vels, cmap=cmap, vmin=v_min, vmax=v_max,
origin='lower', zorder=0, levels=levels, extend='both',
extent=extents)
# Contour overlay
c_levels = 11
c_levels = np.linspace(v_min, v_max, c_levels)
contours = ax.contour(max_vels, cmap=new_cmap, vmin=v_min, vmax=v_max,
origin='lower', levels=c_levels, extend='both',
extent=extents)
# Crosshairs
ax.plot([x_min, x_max], [0, 0], 'k:', scalex=False, scaley=False)
ax.plot([0, 0], [y_min, y_max], 'k:', scalex=False, scaley=False)
# Colorbar
ticks = np.arange(v_min, v_max+1, 50)
ticks = c_levels
bar = plt.colorbar(img, ticks=ticks, pad=0.025)
bar.set_label(r'Velocity (km s$^{-1}$)', labelpad=3.0)
plt.xlabel('Distance (kpc)', labelpad=0.)
plt.ylabel('Distance (kpc)', labelpad=-5.)
x_maj_ticks = ticker.MaxNLocator(2)
x_min_ticks = ticker.AutoMinorLocator(10)
y_maj_ticks = ticker.MaxNLocator(2)
y_min_ticks = ticker.AutoMinorLocator(10)
ax.xaxis.set_major_locator(x_maj_ticks)
ax.xaxis.set_minor_locator(x_min_ticks)
ax.yaxis.set_major_locator(y_maj_ticks)
ax.yaxis.set_minor_locator(y_min_ticks)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
plt.savefig("cone_example.eps")
plt.close()
#plt.show()
| mit |
AloneRoad/Inforlearn | django/contrib/gis/db/models/aggregates.py | 10 | 1098 | from django.db.models import Aggregate
from django.contrib.gis.db.backend import SpatialBackend
from django.contrib.gis.db.models.sql import GeomField
class GeoAggregate(Aggregate):
def add_to_query(self, query, alias, col, source, is_summary):
if hasattr(source, 'geom_type'):
# Doing additional setup on the Query object for spatial aggregates.
aggregate = getattr(query.aggregates_module, self.name)
# Adding a conversion class instance and any selection wrapping
# SQL (e.g., needed by Oracle).
if aggregate.conversion_class is GeomField:
query.extra_select_fields[alias] = GeomField()
if SpatialBackend.select:
query.custom_select[alias] = SpatialBackend.select
super(GeoAggregate, self).add_to_query(query, alias, col, source, is_summary)
class Collect(GeoAggregate):
name = 'Collect'
class Extent(GeoAggregate):
name = 'Extent'
class MakeLine(GeoAggregate):
name = 'MakeLine'
class Union(GeoAggregate):
name = 'Union'
| apache-2.0 |
DMLoy/ECommerceBasic | lib/python2.7/site-packages/django/utils/unittest/main.py | 219 | 9392 | """Unittest main program"""
import sys
import os
import types
from django.utils.unittest import loader, runner
try:
from django.utils.unittest.signals import installHandler
except ImportError:
installHandler = None
__unittest = True
FAILFAST = " -f, --failfast Stop on first failure\n"
CATCHBREAK = " -c, --catch Catch control-C and display results\n"
BUFFEROUTPUT = " -b, --buffer Buffer stdout and stderr during test runs\n"
USAGE_AS_MAIN = """\
Usage: %(progName)s [options] [tests]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s test_module - run tests from test_module
%(progName)s test_module.TestClass - run tests from
test_module.TestClass
%(progName)s test_module.TestClass.test_method - run specified test method
[tests] can be a list of any number of test modules, classes and test
methods.
Alternative Usage: %(progName)s discover [options]
Options:
-v, --verbose Verbose output
%(failfast)s%(catchbreak)s%(buffer)s -s directory Directory to start discovery ('.' default)
-p pattern Pattern to match test files ('test*.py' default)
-t directory Top level directory of project (default to
start directory)
For test discovery all test modules must be importable from the top
level directory of the project.
"""
USAGE_FROM_MODULE = """\
Usage: %(progName)s [options] [test] [...]
Options:
-h, --help Show this message
-v, --verbose Verbose output
-q, --quiet Minimal output
%(failfast)s%(catchbreak)s%(buffer)s
Examples:
%(progName)s - run default set of tests
%(progName)s MyTestSuite - run suite 'MyTestSuite'
%(progName)s MyTestCase.testSomething - run MyTestCase.testSomething
%(progName)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
USAGE = USAGE_FROM_MODULE
# defaults for testing
failfast = catchbreak = buffer = progName = None
def __init__(self, module='__main__', defaultTest=None,
argv=None, testRunner=None,
testLoader=loader.defaultTestLoader, exit=True,
verbosity=1, failfast=None, catchbreak=None, buffer=None):
if isinstance(module, basestring):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.buffer = buffer
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
if msg:
print(msg)
usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
'buffer': ''}
if self.failfast != False:
usage['failfast'] = FAILFAST
if self.catchbreak != False and installHandler is not None:
usage['catchbreak'] = CATCHBREAK
if self.buffer != False:
usage['buffer'] = BUFFEROUTPUT
print(self.USAGE % usage)
sys.exit(2)
def parseArgs(self, argv):
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
import getopt
long_opts = ['help', 'verbose', 'quiet', 'failfast', 'catch', 'buffer']
try:
options, args = getopt.getopt(argv[1:], 'hHvqfcb', long_opts)
for opt, value in options:
if opt in ('-h','-H','--help'):
self.usageExit()
if opt in ('-q','--quiet'):
self.verbosity = 0
if opt in ('-v','--verbose'):
self.verbosity = 2
if opt in ('-f','--failfast'):
if self.failfast is None:
self.failfast = True
# Should this raise an exception if -f is not valid?
if opt in ('-c','--catch'):
if self.catchbreak is None and installHandler is not None:
self.catchbreak = True
# Should this raise an exception if -c is not valid?
if opt in ('-b','--buffer'):
if self.buffer is None:
self.buffer = True
# Should this raise an exception if -b is not valid?
if len(args) == 0 and self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif len(args) > 0:
self.testNames = args
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
else:
self.testNames = (self.defaultTest,)
self.createTests()
except getopt.error as msg:
self.usageExit(msg)
def createTests(self):
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _do_discovery(self, argv, Loader=loader.TestLoader):
# handle command line args for test discovery
self.progName = '%s discover' % self.progName
import optparse
parser = optparse.OptionParser()
parser.prog = self.progName
parser.add_option('-v', '--verbose', dest='verbose', default=False,
help='Verbose output', action='store_true')
if self.failfast != False:
parser.add_option('-f', '--failfast', dest='failfast', default=False,
help='Stop on first fail or error',
action='store_true')
if self.catchbreak != False and installHandler is not None:
parser.add_option('-c', '--catch', dest='catchbreak', default=False,
help='Catch ctrl-C and display results so far',
action='store_true')
if self.buffer != False:
parser.add_option('-b', '--buffer', dest='buffer', default=False,
help='Buffer stdout and stderr during tests',
action='store_true')
parser.add_option('-s', '--start-directory', dest='start', default='.',
help="Directory to start discovery ('.' default)")
parser.add_option('-p', '--pattern', dest='pattern', default='test*.py',
help="Pattern to match tests ('test*.py' default)")
parser.add_option('-t', '--top-level-directory', dest='top', default=None,
help='Top level directory of project (defaults to start directory)')
options, args = parser.parse_args(argv)
if len(args) > 3:
self.usageExit()
for name, value in zip(('start', 'pattern', 'top'), args):
setattr(options, name, value)
# only set options from the parsing here
# if they weren't set explicitly in the constructor
if self.failfast is None:
self.failfast = options.failfast
if self.catchbreak is None and installHandler is not None:
self.catchbreak = options.catchbreak
if self.buffer is None:
self.buffer = options.buffer
if options.verbose:
self.verbosity = 2
start_dir = options.start
pattern = options.pattern
top_level_dir = options.top
loader = Loader()
self.test = loader.discover(start_dir, pattern, top_level_dir)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, (type, types.ClassType)):
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
sys.exit(not self.result.wasSuccessful())
main = TestProgram
def main_():
TestProgram.USAGE = USAGE_AS_MAIN
main(module=None)
| mit |
alzeih/ava | ava_core/notify/migrations/0001_initial.py | 2 | 1389 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-06 02:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='NotificationEmail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100, verbose_name='Name')),
('description', models.TextField(max_length=500, verbose_name='Description')),
('notification_type', models.IntegerField(choices=[(0, 'Introducing AVA'), (1, 'Invitation to join AVA'), (2, 'Update to your profile'), (3, 'Achievement Unlocked'), (4, 'Test Completed - Success!'), (5, 'Test Completed - Failure')], default=0, unique=True, verbose_name='Notification Type')),
('address_from', models.EmailField(max_length=254)),
('subject', models.CharField(max_length=128)),
('body', models.TextField(max_length=1000)),
],
options={
'abstract': False,
},
),
]
| gpl-3.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-61/modules/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| gpl-3.0 |
erkrishna9/odoo | addons/l10n_th/__openerp__.py | 170 | 1533 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Thailand - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
Chart of Accounts for Thailand.
===============================
Thai accounting chart and localization.
""",
'author': 'Almacom',
'website': 'http://almacom.co.th/',
'depends': ['account_chart'],
'data': [ 'account_data.xml' ],
'installable': True,
'images': ['images/config_chart_l10n_th.jpeg','images/l10n_th_chart.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
aalmah/fuel | tests/test_schemes.py | 21 | 7676 | import numpy
from numpy.testing import assert_raises
from fuel.schemes import (ConstantScheme, SequentialExampleScheme,
SequentialScheme, ShuffledExampleScheme,
ShuffledScheme, ConcatenatedScheme,
cross_validation)
def iterator_requester(scheme):
def get_request_iterator(*args, **kwargs):
scheme_obj = scheme(*args, **kwargs)
return scheme_obj.get_request_iterator()
return get_request_iterator
def test_constant_scheme():
get_request_iterator = iterator_requester(ConstantScheme)
assert list(get_request_iterator(3, num_examples=7)) == [3, 3, 1]
assert list(get_request_iterator(3, num_examples=9)) == [3, 3, 3]
assert list(get_request_iterator(3, num_examples=2)) == [2]
assert list(get_request_iterator(2, times=3)) == [2, 2, 2]
assert list(get_request_iterator(3, times=1)) == [3]
it = get_request_iterator(3)
assert [next(it) == 3 for _ in range(10)]
assert_raises(ValueError, get_request_iterator, 10, 2, 2)
assert not ConstantScheme(3, 3).requests_examples
def test_sequential_scheme():
get_request_iterator = iterator_requester(SequentialScheme)
assert list(get_request_iterator(5, 3)) == [[0, 1, 2], [3, 4]]
assert list(get_request_iterator(4, 2)) == [[0, 1], [2, 3]]
assert list(get_request_iterator(
[4, 3, 2, 1, 0], 3)) == [[4, 3, 2], [1, 0]]
assert list(get_request_iterator(
[3, 2, 1, 0], 2)) == [[3, 2], [1, 0]]
assert not SequentialScheme(3, 3).requests_examples
def test_shuffled_scheme_sorted_indices():
get_request_iterator = iterator_requester(ShuffledScheme)
indices = list(range(7))
rng = numpy.random.RandomState(3)
test_rng = numpy.random.RandomState(3)
test_rng.shuffle(indices)
assert list(get_request_iterator(7, 3, rng=rng, sorted_indices=True)) == \
[sorted(indices[:3]), sorted(indices[3:6]), sorted(indices[6:])]
assert list(get_request_iterator(7, 3, rng=rng, sorted_indices=True)) != \
[sorted(indices[:3]), sorted(indices[3:6]), sorted(indices[6:])]
indices = list(range(6))[::-1]
expected = indices[:]
rng = numpy.random.RandomState(3)
test_rng = numpy.random.RandomState(3)
test_rng.shuffle(expected)
assert (list(get_request_iterator(indices, 3, rng=rng,
sorted_indices=True)) ==
[sorted(expected[:3]), sorted(expected[3:6])])
def test_shuffled_scheme_unsorted_indices():
get_request_iterator = iterator_requester(ShuffledScheme)
indices = list(range(7))
rng = numpy.random.RandomState(3)
test_rng = numpy.random.RandomState(3)
test_rng.shuffle(indices)
assert list(get_request_iterator(7, 3, rng=rng, sorted_indices=False)) == \
[indices[:3], indices[3:6], indices[6:]]
assert list(get_request_iterator(7, 3, rng=rng, sorted_indices=False)) != \
[indices[:3], indices[3:6], indices[6:]]
indices = list(range(6))[::-1]
expected = indices[:]
rng = numpy.random.RandomState(3)
test_rng = numpy.random.RandomState(3)
test_rng.shuffle(expected)
assert (list(get_request_iterator(indices, 3, rng=rng,
sorted_indices=False)) ==
[expected[:3], expected[3:6]])
def test_shuffled_scheme_requests_batches():
assert not ShuffledScheme(3, 3).requests_examples
def test_shuffled_example_scheme():
get_request_iterator = iterator_requester(ShuffledExampleScheme)
indices = list(range(7))
rng = numpy.random.RandomState(3)
test_rng = numpy.random.RandomState(3)
test_rng.shuffle(indices)
assert list(get_request_iterator(7, rng=rng)) == indices
def test_shuffled_example_scheme_no_rng():
scheme = ShuffledExampleScheme(7)
assert scheme.rng is not None
def test_shuffled_example_scheme_requests_examples():
assert ShuffledExampleScheme(3).requests_examples
def test_sequential_example_scheme():
get_request_iterator = iterator_requester(SequentialExampleScheme)
assert list(get_request_iterator(7)) == list(range(7))
assert list(get_request_iterator(range(7)[::-1])) == list(range(7)[::-1])
def test_sequential_example_scheme_requests_examples():
assert SequentialExampleScheme(3).requests_examples
def test_concatenated_scheme():
sch = ConcatenatedScheme(schemes=[ConstantScheme(batch_size=10, times=5),
ConstantScheme(batch_size=20, times=3),
ConstantScheme(batch_size=30, times=1)])
assert (list(sch.get_request_iterator()) ==
([10] * 5) + ([20] * 3) + [30])
def test_concatenated_scheme_raises_value_error_on_different_request_types():
assert_raises(ValueError, ConcatenatedScheme,
[ConstantScheme(batch_size=10, times=5),
SequentialExampleScheme(examples=3)])
def test_concatenated_scheme_infers_request_type():
assert not ConcatenatedScheme(
schemes=[ConstantScheme(batch_size=10, times=5),
ConstantScheme(batch_size=10, times=5)]).requests_examples
assert ConcatenatedScheme(
schemes=[SequentialExampleScheme(examples=10),
SequentialExampleScheme(examples=10)]).requests_examples
def test_cross_validation():
# test raise when strict=True
cross = cross_validation(SequentialExampleScheme, 10, 3)
assert_raises(ValueError, next, cross)
# test IndexScheme when strict=False
cross = cross_validation(SequentialExampleScheme, 10, 3, False)
(train, valid, valid_size) = next(cross)
assert list(train.get_request_iterator()) == list(range(3, 10))
assert list(valid.get_request_iterator()) == list(range(0, 3))
# test that indices are not depleted
assert list(train.get_request_iterator()) == list(range(3, 10))
assert list(valid.get_request_iterator()) == list(range(0, 3))
assert valid_size == 3
(train, valid, valid_size) = next(cross)
assert (list(train.get_request_iterator()) ==
list(range(0, 3)) + list(range(6, 10)))
assert list(valid.get_request_iterator()) == list(range(3, 6))
# test that indices are not depleted
assert (list(train.get_request_iterator()) ==
list(range(0, 3)) + list(range(6, 10)))
assert list(valid.get_request_iterator()) == list(range(3, 6))
assert valid_size == 3
(train, valid, valid_size) = next(cross)
assert list(train.get_request_iterator()) == list(range(0, 6))
assert list(valid.get_request_iterator()) == list(range(6, 10))
# test that indices are not depleted
assert list(train.get_request_iterator()) == list(range(0, 6))
assert list(valid.get_request_iterator()) == list(range(6, 10))
assert valid_size == 4
assert_raises(StopIteration, next, cross)
# test BatchScheme
cross = cross_validation(SequentialScheme, 8, 2, batch_size=2)
(train, valid) = next(cross)
assert list(train.get_request_iterator()) == [[4, 5], [6, 7]]
assert list(valid.get_request_iterator()) == [[0, 1], [2, 3]]
# test that indices are not depleted
assert list(train.get_request_iterator()) == [[4, 5], [6, 7]]
assert list(valid.get_request_iterator()) == [[0, 1], [2, 3]]
(train, valid) = next(cross)
assert list(train.get_request_iterator()) == [[0, 1], [2, 3]]
assert list(valid.get_request_iterator()) == [[4, 5], [6, 7]]
# test that indices are not depleted
assert list(train.get_request_iterator()) == [[0, 1], [2, 3]]
assert list(valid.get_request_iterator()) == [[4, 5], [6, 7]]
assert_raises(StopIteration, next, cross)
| mit |
XperiaSTE/android_kernel_sony_u8500 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
2013Commons/HUE-SHARK | build/env/lib/python2.7/site-packages/Django-1.2.3-py2.7.egg/django/contrib/gis/maps/google/gmap.py | 321 | 9000 | from django.conf import settings
from django.contrib.gis import geos
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
class GoogleMapException(Exception): pass
from django.contrib.gis.maps.google.overlays import GPolygon, GPolyline, GMarker, GIcon
# The default Google Maps URL (for the API javascript)
# TODO: Internationalize for Japan, UK, etc.
GOOGLE_MAPS_URL='http://maps.google.com/maps?file=api&v=%s&key='
class GoogleMap(object):
"A class for generating Google Maps JavaScript."
# String constants
onunload = mark_safe('onunload="GUnload()"') # Cleans up after Google Maps
vml_css = mark_safe('v\:* {behavior:url(#default#VML);}') # CSS for IE VML
xmlns = mark_safe('xmlns:v="urn:schemas-microsoft-com:vml"') # XML Namespace (for IE VML).
def __init__(self, key=None, api_url=None, version=None,
center=None, zoom=None, dom_id='map',
kml_urls=[], polylines=None, polygons=None, markers=None,
template='gis/google/google-map.js',
js_module='geodjango',
extra_context={}):
# The Google Maps API Key defined in the settings will be used
# if not passed in as a parameter. The use of an API key is
# _required_.
if not key:
try:
self.key = settings.GOOGLE_MAPS_API_KEY
except AttributeError:
raise GoogleMapException('Google Maps API Key not found (try adding GOOGLE_MAPS_API_KEY to your settings).')
else:
self.key = key
# Getting the Google Maps API version, defaults to using the latest ("2.x"),
# this is not necessarily the most stable.
if not version:
self.version = getattr(settings, 'GOOGLE_MAPS_API_VERSION', '2.x')
else:
self.version = version
# Can specify the API URL in the `api_url` keyword.
if not api_url:
self.api_url = mark_safe(getattr(settings, 'GOOGLE_MAPS_URL', GOOGLE_MAPS_URL) % self.version)
else:
self.api_url = api_url
# Setting the DOM id of the map, the load function, the JavaScript
# template, and the KML URLs array.
self.dom_id = dom_id
self.extra_context = extra_context
self.js_module = js_module
self.template = template
self.kml_urls = kml_urls
# Does the user want any GMarker, GPolygon, and/or GPolyline overlays?
overlay_info = [[GMarker, markers, 'markers'],
[GPolygon, polygons, 'polygons'],
[GPolyline, polylines, 'polylines']]
for overlay_class, overlay_list, varname in overlay_info:
setattr(self, varname, [])
if overlay_list:
for overlay in overlay_list:
if isinstance(overlay, overlay_class):
getattr(self, varname).append(overlay)
else:
getattr(self, varname).append(overlay_class(overlay))
# If GMarker, GPolygons, and/or GPolylines are used the zoom will be
# automatically calculated via the Google Maps API. If both a zoom
# level and a center coordinate are provided with polygons/polylines,
# no automatic determination will occur.
self.calc_zoom = False
if self.polygons or self.polylines or self.markers:
if center is None or zoom is None:
self.calc_zoom = True
# Defaults for the zoom level and center coordinates if the zoom
# is not automatically calculated.
if zoom is None: zoom = 4
self.zoom = zoom
if center is None: center = (0, 0)
self.center = center
def render(self):
"""
Generates the JavaScript necessary for displaying this Google Map.
"""
params = {'calc_zoom' : self.calc_zoom,
'center' : self.center,
'dom_id' : self.dom_id,
'js_module' : self.js_module,
'kml_urls' : self.kml_urls,
'zoom' : self.zoom,
'polygons' : self.polygons,
'polylines' : self.polylines,
'icons': self.icons,
'markers' : self.markers,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def body(self):
"Returns HTML body tag for loading and unloading Google Maps javascript."
return mark_safe('<body %s %s>' % (self.onload, self.onunload))
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
return mark_safe('onload="%s.%s_load()"' % (self.js_module, self.dom_id))
@property
def api_script(self):
"Returns the <script> tag for the Google Maps API javascript."
return mark_safe('<script src="%s%s" type="text/javascript"></script>' % (self.api_url, self.key))
@property
def js(self):
"Returns only the generated Google Maps JavaScript (no <script> tags)."
return self.render()
@property
def scripts(self):
"Returns all <script></script> tags required with Google Maps JavaScript."
return mark_safe('%s\n <script type="text/javascript">\n//<![CDATA[\n%s//]]>\n </script>' % (self.api_script, self.js))
@property
def style(self):
"Returns additional CSS styling needed for Google Maps on IE."
return mark_safe('<style type="text/css">%s</style>' % self.vml_css)
@property
def xhtml(self):
"Returns XHTML information needed for IE VML overlays."
return mark_safe('<html xmlns="http://www.w3.org/1999/xhtml" %s>' % self.xmlns)
@property
def icons(self):
"Returns a sequence of GIcon objects in this map."
return set([marker.icon for marker in self.markers if marker.icon])
class GoogleMapSet(GoogleMap):
def __init__(self, *args, **kwargs):
"""
A class for generating sets of Google Maps that will be shown on the
same page together.
Example:
gmapset = GoogleMapSet( GoogleMap( ... ), GoogleMap( ... ) )
gmapset = GoogleMapSet( [ gmap1, gmap2] )
"""
# The `google-multi.js` template is used instead of `google-single.js`
# by default.
template = kwargs.pop('template', 'gis/google/google-multi.js')
# This is the template used to generate the GMap load JavaScript for
# each map in the set.
self.map_template = kwargs.pop('map_template', 'gis/google/google-single.js')
# Running GoogleMap.__init__(), and resetting the template
# value with default obtained above.
super(GoogleMapSet, self).__init__(**kwargs)
self.template = template
# If a tuple/list passed in as first element of args, then assume
if isinstance(args[0], (tuple, list)):
self.maps = args[0]
else:
self.maps = args
# Generating DOM ids for each of the maps in the set.
self.dom_ids = ['map%d' % i for i in xrange(len(self.maps))]
def load_map_js(self):
"""
Returns JavaScript containing all of the loading routines for each
map in this set.
"""
result = []
for dom_id, gmap in zip(self.dom_ids, self.maps):
# Backup copies the GoogleMap DOM id and template attributes.
# They are overridden on each GoogleMap instance in the set so
# that only the loading JavaScript (and not the header variables)
# is used with the generated DOM ids.
tmp = (gmap.template, gmap.dom_id)
gmap.template = self.map_template
gmap.dom_id = dom_id
result.append(gmap.js)
# Restoring the backup values.
gmap.template, gmap.dom_id = tmp
return mark_safe(''.join(result))
def render(self):
"""
Generates the JavaScript for the collection of Google Maps in
this set.
"""
params = {'js_module' : self.js_module,
'dom_ids' : self.dom_ids,
'load_map_js' : self.load_map_js(),
'icons' : self.icons,
}
params.update(self.extra_context)
return render_to_string(self.template, params)
@property
def onload(self):
"Returns the `onload` HTML <body> attribute."
# Overloaded to use the `load` function defined in the
# `google-multi.js`, which calls the load routines for
# each one of the individual maps in the set.
return mark_safe('onload="%s.load()"' % self.js_module)
@property
def icons(self):
"Returns a sequence of all icons in each map of the set."
icons = set()
for map in self.maps: icons |= map.icons
return icons
| apache-2.0 |
isrohutamahopetechnik/MissionPlanner | Lib/xml/dom/domreg.py | 63 | 3577 | """Registration facilities for DOM. This module should not be used
directly. Instead, the functions getDOMImplementation and
registerDOMImplementation should be imported from xml.dom."""
from xml.dom.minicompat import * # isinstance, StringTypes
# This is a list of well-known implementations. Well-known names
# should be published by posting to xml-sig@python.org, and are
# subsequently recorded in this file.
well_known_implementations = {
'minidom':'xml.dom.minidom',
'4DOM': 'xml.dom.DOMImplementation',
}
# DOM implementations not officially registered should register
# themselves with their
registered = {}
def registerDOMImplementation(name, factory):
"""registerDOMImplementation(name, factory)
Register the factory function with the name. The factory function
should return an object which implements the DOMImplementation
interface. The factory function can either return the same object,
or a new one (e.g. if that implementation supports some
customization)."""
registered[name] = factory
def _good_enough(dom, features):
"_good_enough(dom, features) -> Return 1 if the dom offers the features"
for f,v in features:
if not dom.hasFeature(f,v):
return 0
return 1
def getDOMImplementation(name = None, features = ()):
"""getDOMImplementation(name = None, features = ()) -> DOM implementation.
Return a suitable DOM implementation. The name is either
well-known, the module name of a DOM implementation, or None. If
it is not None, imports the corresponding module and returns
DOMImplementation object if the import succeeds.
If name is not given, consider the available implementations to
find one with the required feature set. If no implementation can
be found, raise an ImportError. The features list must be a sequence
of (feature, version) pairs which are passed to hasFeature."""
import os
creator = None
mod = well_known_implementations.get(name)
if mod:
mod = __import__(mod, {}, {}, ['getDOMImplementation'])
return mod.getDOMImplementation()
elif name:
return registered[name]()
elif "PYTHON_DOM" in os.environ:
return getDOMImplementation(name = os.environ["PYTHON_DOM"])
# User did not specify a name, try implementations in arbitrary
# order, returning the one that has the required features
if isinstance(features, StringTypes):
features = _parse_feature_string(features)
for creator in registered.values():
dom = creator()
if _good_enough(dom, features):
return dom
for creator in well_known_implementations.keys():
try:
dom = getDOMImplementation(name = creator)
except StandardError: # typically ImportError, or AttributeError
continue
if _good_enough(dom, features):
return dom
raise ImportError,"no suitable DOM implementation found"
def _parse_feature_string(s):
features = []
parts = s.split()
i = 0
length = len(parts)
while i < length:
feature = parts[i]
if feature[0] in "0123456789":
raise ValueError, "bad feature name: %r" % (feature,)
i = i + 1
version = None
if i < length:
v = parts[i]
if v[0] in "0123456789":
i = i + 1
version = v
features.append((feature, version))
return tuple(features)
| gpl-3.0 |
singulared/aiohttp | tests/test_web_server.py | 1 | 3093 | import asyncio
from unittest import mock
import pytest
from aiohttp import client, web
@asyncio.coroutine
def test_simple_server(raw_test_server, test_client):
@asyncio.coroutine
def handler(request):
return web.Response(text=str(request.rel_url))
server = yield from raw_test_server(handler)
cli = yield from test_client(server)
resp = yield from cli.get('/path/to')
assert resp.status == 200
txt = yield from resp.text()
assert txt == '/path/to'
@asyncio.coroutine
def test_raw_server_not_http_exception(raw_test_server, test_client):
exc = RuntimeError("custom runtime error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
cli = yield from test_client(server)
resp = yield from cli.get('/path/to')
assert resp.status == 500
txt = yield from resp.text()
assert "<h1>500 Internal Server Error</h1>" in txt
logger.exception.assert_called_with(
"Error handling request",
exc_info=exc)
@asyncio.coroutine
def test_raw_server_handler_timeout(raw_test_server, test_client):
exc = asyncio.TimeoutError("error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
cli = yield from test_client(server)
resp = yield from cli.get('/path/to')
assert resp.status == 504
txt = yield from resp.text()
assert "<h1>504 Gateway Timeout</h1>" in txt
logger.debug.assert_called_with("Request handler timed out.")
@asyncio.coroutine
def test_raw_server_do_not_swallow_exceptions(raw_test_server, test_client):
exc = None
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger)
cli = yield from test_client(server)
for _exc, msg in (
(asyncio.CancelledError("error"),
'Ignored premature client disconnection'),):
exc = _exc
with pytest.raises(client.ServerDisconnectedError):
yield from cli.get('/path/to')
logger.debug.assert_called_with(msg)
@asyncio.coroutine
def test_raw_server_not_http_exception_debug(raw_test_server, test_client):
exc = RuntimeError("custom runtime error")
@asyncio.coroutine
def handler(request):
raise exc
logger = mock.Mock()
server = yield from raw_test_server(handler, logger=logger, debug=True)
cli = yield from test_client(server)
resp = yield from cli.get('/path/to')
assert resp.status == 500
txt = yield from resp.text()
assert "<h2>Traceback:</h2>" in txt
logger.exception.assert_called_with(
"Error handling request",
exc_info=exc)
def test_create_web_server_with_implicit_loop(loop):
asyncio.set_event_loop(loop)
@asyncio.coroutine
def handler(request):
return web.Response() # pragma: no cover
srv = web.Server(handler)
assert srv._loop is loop
| apache-2.0 |
1d4Nf6/flocker | flocker/control/httpapi.py | 5 | 39372 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
A HTTP REST API for controlling the Dataset Manager.
"""
import yaml
from uuid import uuid4, UUID
from pyrsistent import pmap, thaw
from twisted.protocols.tls import TLSMemoryBIOFactory
from twisted.python.filepath import FilePath
from twisted.web.http import (
CONFLICT, CREATED, NOT_FOUND, OK, NOT_ALLOWED as METHOD_NOT_ALLOWED,
BAD_REQUEST
)
from twisted.web.server import Site
from twisted.web.resource import Resource
from twisted.application.internet import StreamServerEndpointService
from klein import Klein
from pyrsistent import discard
from ..restapi import (
EndpointResponse, structured, user_documentation, make_bad_request,
private_api
)
from . import (
Dataset, Manifestation, Application, DockerImage, Port,
AttachedVolume, Link
)
from ._config import (
ApplicationMarshaller, FLOCKER_RESTART_POLICY_NAME_TO_POLICY,
model_from_configuration, FigConfiguration, FlockerConfiguration,
ConfigurationError
)
from .. import __version__
# Default port for REST API:
REST_API_PORT = 4523
SCHEMA_BASE = FilePath(__file__).parent().child(b'schema')
SCHEMAS = {
b'/v1/types.json': yaml.safe_load(
SCHEMA_BASE.child(b'types.yml').getContent()),
b'/v1/endpoints.json': yaml.safe_load(
SCHEMA_BASE.child(b'endpoints.yml').getContent()),
}
CONTAINER_NAME_COLLISION = make_bad_request(
code=CONFLICT, description=u"The container name already exists."
)
CONTAINER_NOT_FOUND = make_bad_request(
code=NOT_FOUND, description=u"Container not found.")
CONTAINER_PORT_COLLISION = make_bad_request(
code=CONFLICT, description=u"A specified external port is already in use."
)
LINK_PORT_COLLISION = make_bad_request(
code=CONFLICT,
description=u"The local ports in a container's links must be unique."
)
LINK_ALIAS_COLLISION = make_bad_request(
code=CONFLICT, description=u"Link aliases must be unique."
)
DATASET_ID_COLLISION = make_bad_request(
code=CONFLICT, description=u"The provided dataset_id is already in use.")
PRIMARY_NODE_NOT_FOUND = make_bad_request(
description=u"The provided primary node is not part of the cluster.")
DATASET_NOT_FOUND = make_bad_request(
code=NOT_FOUND, description=u"Dataset not found.")
DATASET_DELETED = make_bad_request(
code=METHOD_NOT_ALLOWED, description=u"The dataset has been deleted.")
DATASET_ON_DIFFERENT_NODE = make_bad_request(
code=CONFLICT, description=u"The dataset is on another node.")
DATASET_IN_USE = make_bad_request(
code=CONFLICT,
description=u"The dataset is being used by another container.")
_UNDEFINED_MAXIMUM_SIZE = object()
class ConfigurationAPIUserV1(object):
"""
A user accessing the API.
The APIs exposed here typically operate on cluster configuration. They
frequently return success results when a configuration change has been made
durable but has not yet been deployed onto the cluster.
"""
app = Klein()
def __init__(self, persistence_service, cluster_state_service):
"""
:param ConfigurationPersistenceService persistence_service: Service
for retrieving and setting desired configuration.
:param ClusterStateService cluster_state_service: Service that
knows about the current state of the cluster.
"""
self.persistence_service = persistence_service
self.cluster_state_service = cluster_state_service
@app.route("/version", methods=['GET'])
@user_documentation(
u"""
Get the version of Flocker being run.
""",
section=u"common",
header=u"Get Flocker version",
examples=[u"get version"])
@structured(
inputSchema={},
outputSchema={'$ref': '/v1/endpoints.json#/definitions/versions'},
schema_store=SCHEMAS
)
def version(self):
"""
Return the ``flocker`` version string.
"""
return {u"flocker": __version__}
@app.route("/configuration/datasets", methods=['GET'])
@user_documentation(
u"""
Get the cluster's dataset configuration.
""",
header=u"Get the cluster's dataset configuration",
examples=[u"get configured datasets"],
section=u"dataset",
)
@structured(
inputSchema={},
outputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_datasets_list',
},
schema_store=SCHEMAS,
)
def get_dataset_configuration(self):
"""
Get the configured datasets.
:return: A ``list`` of ``dict`` representing each of dataset
that is configured to exist anywhere on the cluster.
"""
return list(datasets_from_deployment(self.persistence_service.get()))
@app.route("/configuration/datasets", methods=['POST'])
@user_documentation(
u"""
Create a new dataset.
""",
header=u"Create new dataset",
examples=[
u"create dataset",
u"create dataset with dataset_id",
u"create dataset with duplicate dataset_id",
u"create dataset with maximum_size",
u"create dataset with metadata",
],
section=u"dataset",
)
@structured(
inputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_datasets_create'
},
outputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_datasets'},
schema_store=SCHEMAS
)
def create_dataset_configuration(self, primary, dataset_id=None,
maximum_size=None, metadata=None):
"""
Create a new dataset in the cluster configuration.
:param unicode primary: The UUID of the node on which the primary
manifestation of the dataset will be created.
:param unicode dataset_id: A unique identifier to assign to the
dataset. This is a string giving a UUID (per RFC 4122). If no
value is given, one will be generated and returned in the response.
This is not for easy human use. For human-friendly identifiers,
use items in ``metadata``.
:param maximum_size: Either the maximum number of bytes the dataset
will be capable of storing or ``None`` to make the dataset size
unlimited. This may be optional or required depending on the
dataset backend.
:param dict metadata: A small collection of unicode key/value pairs to
associate with the dataset. These items are not interpreted. They
are only stored and made available for later retrieval. Use this
for things like human-friendly dataset naming, ownership
information, etc.
:return: A ``dict`` describing the dataset which has been added to the
cluster configuration or giving error information if this is not
possible.
"""
if dataset_id is None:
dataset_id = unicode(uuid4())
dataset_id = dataset_id.lower()
if metadata is None:
metadata = {}
primary = UUID(hex=primary)
# Use persistence_service to get a Deployment for the cluster
# configuration.
deployment = self.persistence_service.get()
for node in deployment.nodes:
for manifestation in node.manifestations.values():
if manifestation.dataset.dataset_id == dataset_id:
raise DATASET_ID_COLLISION
# XXX Check cluster state to determine if the given primary node
# actually exists. If not, raise PRIMARY_NODE_NOT_FOUND.
# See FLOC-1278
dataset = Dataset(
dataset_id=dataset_id,
maximum_size=maximum_size,
metadata=pmap(metadata)
)
manifestation = Manifestation(dataset=dataset, primary=True)
primary_node = deployment.get_node(primary)
new_node_config = primary_node.transform(
("manifestations", manifestation.dataset_id), manifestation)
new_deployment = deployment.update_node(new_node_config)
saving = self.persistence_service.save(new_deployment)
def saved(ignored):
result = api_dataset_from_dataset_and_node(dataset, primary)
return EndpointResponse(CREATED, result)
saving.addCallback(saved)
return saving
@app.route("/configuration/datasets/<dataset_id>", methods=['DELETE'])
@user_documentation(
u"""
Deletion is idempotent: deleting a dataset multiple times will
result in the same response.
""",
header=u"Delete an existing dataset",
examples=[
u"delete dataset",
u"delete dataset with unknown dataset id",
],
section=u"dataset",
)
@structured(
inputSchema={},
outputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_datasets'},
schema_store=SCHEMAS
)
def delete_dataset(self, dataset_id):
"""
Delete an existing dataset in the cluster configuration.
:param unicode dataset_id: The unique identifier of the dataset. This
is a string giving a UUID (per RFC 4122).
:return: A ``dict`` describing the dataset which has been marked
as deleted in the cluster configuration or giving error
information if this is not possible.
"""
# Get the current configuration.
deployment = self.persistence_service.get()
# XXX this doesn't handle replicas
# https://clusterhq.atlassian.net/browse/FLOC-1240
old_manifestation, origin_node = _find_manifestation_and_node(
deployment, dataset_id)
new_node = origin_node.transform(
("manifestations", dataset_id, "dataset", "deleted"), True)
deployment = deployment.update_node(new_node)
saving = self.persistence_service.save(deployment)
def saved(ignored):
result = api_dataset_from_dataset_and_node(
new_node.manifestations[dataset_id].dataset, new_node.uuid,
)
return EndpointResponse(OK, result)
saving.addCallback(saved)
return saving
@app.route("/configuration/datasets/<dataset_id>", methods=['POST'])
@user_documentation(
u"""
This can be used to:
* Move a dataset from one node to another by changing the
``primary`` attribute.
* In the future update metadata.
""",
header=u"Update an existing dataset",
examples=[
u"update dataset with primary",
u"update dataset with unknown dataset id",
],
section=u"dataset",
)
@structured(
inputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_datasets_update'},
outputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_datasets'},
schema_store=SCHEMAS
)
def update_dataset(self, dataset_id, primary=None):
"""
Update an existing dataset in the cluster configuration.
:param unicode dataset_id: The unique identifier of the dataset. This
is a string giving a UUID (per RFC 4122).
:param primary: The UUID of the node to which the dataset will be
moved, or ``None`` indicating no change.
:return: A ``dict`` describing the dataset which has been added to the
cluster configuration or giving error information if this is not
possible.
"""
# Get the current configuration.
deployment = self.persistence_service.get()
# Raises DATASET_NOT_FOUND if the ``dataset_id`` is not found.
primary_manifestation, current_node = _find_manifestation_and_node(
deployment, dataset_id
)
if primary_manifestation.dataset.deleted:
raise DATASET_DELETED
if primary is not None:
deployment = _update_dataset_primary(
deployment, dataset_id, UUID(hex=primary)
)
saving = self.persistence_service.save(deployment)
primary_manifestation, current_node = _find_manifestation_and_node(
deployment, dataset_id
)
# Return an API response dictionary containing the dataset with updated
# primary address.
def saved(ignored):
result = api_dataset_from_dataset_and_node(
primary_manifestation.dataset,
current_node.uuid,
)
return EndpointResponse(OK, result)
saving.addCallback(saved)
return saving
@app.route("/state/datasets", methods=['GET'])
@user_documentation(
u"""
The result reflects the control service's knowledge, which may be
out of date or incomplete. E.g. a dataset agent has not connected
or updated the control service yet.
""",
header=u"Get current cluster datasets",
examples=[u"get state datasets"],
section=u"dataset",
)
@structured(
inputSchema={},
outputSchema={
'$ref': '/v1/endpoints.json#/definitions/state_datasets_array'
},
schema_store=SCHEMAS
)
def state_datasets(self):
"""
Return all primary manifest datasets and all non-manifest datasets in
the cluster.
:return: A ``list`` containing all datasets in the cluster.
"""
# XXX This duplicates code in datasets_from_deployment, but that
# function is designed to operate on a Deployment rather than a
# DeploymentState instance and the dataset configuration result
# includes metadata and deleted flags which should not be part of the
# dataset state response.
# Refactor. See FLOC-2207.
response = []
deployment_state = self.cluster_state_service.as_deployment()
get_manifestation_path = self.cluster_state_service.manifestation_path
for dataset, node in deployment_state.all_datasets():
response_dataset = dict(
dataset_id=dataset.dataset_id,
)
if node is not None:
response_dataset[u"primary"] = unicode(node.uuid)
response_dataset[u"path"] = get_manifestation_path(
node.uuid,
dataset[u"dataset_id"]
).path.decode("utf-8")
if dataset.maximum_size is not None:
response_dataset[u"maximum_size"] = dataset.maximum_size
response.append(response_dataset)
return response
@app.route("/configuration/containers", methods=['GET'])
@user_documentation(
u"""
These containers may or may not actually exist on the
cluster.
""",
header=u"Get the cluster's container configuration",
examples=[u"get configured containers"],
section=u"container",
)
@structured(
inputSchema={},
outputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_containers_array',
},
schema_store=SCHEMAS,
)
def get_containers_configuration(self):
"""
Get the configured containers.
:return: A ``list`` of ``dict`` representing each of the containers
that are configured to exist anywhere on the cluster.
"""
return list(containers_from_deployment(self.persistence_service.get()))
@app.route("/state/containers", methods=['GET'])
@user_documentation(
u"""
This reflects the control service's knowledge of the cluster,
which may be out of date or incomplete, e.g. if a container agent
has not connected or updated the control service yet.
""",
header=u"Get the cluster's actual containers",
examples=[u"get actual containers"],
section=u"container",
)
@structured(
inputSchema={},
outputSchema={
'$ref':
'/v1/endpoints.json#/definitions/state_containers_array',
},
schema_store=SCHEMAS,
)
def get_containers_state(self):
"""
Get the containers present in the cluster.
:return: A ``list`` of ``dict`` representing each of the containers
that are configured to exist anywhere on the cluster.
"""
result = []
deployment_state = self.cluster_state_service.as_deployment()
for node in deployment_state.nodes:
if node.applications is None:
continue
for application in node.applications:
container = container_configuration_response(
application, node.uuid)
container[u"running"] = application.running
result.append(container)
return result
def _get_attached_volume(self, node_uuid, volume):
"""
Create an ``AttachedVolume`` given a volume dictionary.
:param UUID node_uuid: The node where the volume should be.
:param dict volume: Parameters for specific volume passed to creation
endpoint.
:return AttachedVolume: Corresponding instance.
"""
deployment = self.persistence_service.get()
instances = list(manifestations_from_deployment(
deployment, volume[u"dataset_id"]))
if not any(m for (m, _) in instances if not m.dataset.deleted):
raise DATASET_NOT_FOUND
if not any(n for (_, n) in instances if n.uuid == node_uuid):
raise DATASET_ON_DIFFERENT_NODE
if any(app for app in deployment.applications() if
app.volume and
app.volume.manifestation.dataset_id == volume[u"dataset_id"]):
raise DATASET_IN_USE
return AttachedVolume(
manifestation=[m for (m, node) in instances
if node.uuid == node_uuid and m.primary][0],
mountpoint=FilePath(volume[u"mountpoint"].encode("utf-8")))
@app.route("/configuration/containers", methods=['POST'])
@user_documentation(
u"""
The container will be automatically started once it is created on
the cluster.
""",
header=u"Add a new container to the configuration",
examples=[
u"create container",
u"create container with duplicate name",
u"create container with ports",
u"create container with environment",
u"create container with attached volume",
u"create container with cpu shares",
u"create container with memory limit",
u"create container with links",
u"create container with command line",
# No example of creating a container with a different restart
# policy because only the "never" policy is supported. See
# FLOC-2449.
],
section=u"container",
)
@structured(
inputSchema={
'$ref': '/v1/endpoints.json#/definitions/configuration_container'},
outputSchema={
'$ref': '/v1/endpoints.json#/definitions/configuration_container'},
schema_store=SCHEMAS
)
def create_container_configuration(
self, node_uuid, name, image, ports=(), environment=None,
restart_policy=None, cpu_shares=None, memory_limit=None,
links=(), volumes=(), command_line=None,
):
"""
Create a new dataset in the cluster configuration.
:param unicode node_uuid: The UUID of the node on which the container
will run.
:param unicode name: A unique identifier for the container within
the Flocker cluster.
:param unicode image: The name of the Docker image to use for the
container.
:param list ports: A ``list`` of ``dict`` objects, mapping internal
to external ports for the container.
:param dict environment: A ``dict`` of key/value pairs to be supplied
to the container as environment variables. Keys and values must be
``unicode``.
:param dict restart_policy: A restart policy for the container, this
is a ``dict`` with at a minimum a "name" key, whose value must be
one of "always", "never" or "on-failure". If the "name" is given
as "on-failure", there may also be another optional key
"maximum_retry_count", containing a positive ``int`` specifying
the maximum number of times we should attempt to restart a failed
container.
:param volumes: A iterable of ``dict`` with ``"dataset_id"`` and
``"mountpoint"`` keys.
:param int cpu_shares: A positive integer specifying the relative
weighting of CPU cycles for this container (see Docker's run
reference for further information).
:param int memory_limit: A positive integer specifying the maximum
amount of memory in bytes available to this container.
:param list links: A ``list`` of ``dict`` objects, mapping container
links via "alias", "local_port" and "remote_port" values.
:param command_line: If not ``None``, the command line to use when
running the Docker image's entry point.
:return: An ``EndpointResponse`` describing the container which has
been added to the cluster configuration.
"""
deployment = self.persistence_service.get()
node_uuid = UUID(hex=node_uuid)
# Check if container by this name already exists, if it does
# return error.
for node in deployment.nodes:
for application in node.applications:
if application.name == name:
raise CONTAINER_NAME_COLLISION
# Find the volume, if any; currently we only support one volume
# https://clusterhq.atlassian.net/browse/FLOC-49
attached_volume = None
if volumes:
attached_volume = self._get_attached_volume(node_uuid, volumes[0])
# Find the node.
node = deployment.get_node(node_uuid)
# Check if we have any ports in the request. If we do, check existing
# external ports exposed to ensure there is no conflict. If there is a
# conflict, return an error.
for port in ports:
for current_node in deployment.nodes:
for application in current_node.applications:
for application_port in application.ports:
if application_port.external_port == port['external']:
raise CONTAINER_PORT_COLLISION
# If links are present, check that there are no conflicts in local
# ports or alias names.
link_aliases = set()
link_local_ports = set()
application_links = set()
for link in links:
if link['alias'] in link_aliases:
raise LINK_ALIAS_COLLISION
if link['local_port'] in link_local_ports:
raise LINK_PORT_COLLISION
link_aliases.add(link['alias'])
link_local_ports.add(link['local_port'])
application_links.add(
Link(
alias=link['alias'], local_port=link['local_port'],
remote_port=link['remote_port']
)
)
# If we have ports specified, add these to the Application instance.
application_ports = []
for port in ports:
application_ports.append(Port(
internal_port=port['internal'],
external_port=port['external']
))
if environment is not None:
environment = frozenset(environment.items())
if restart_policy is None:
restart_policy = dict(name=u"never")
policy_name = restart_policy.pop("name")
policy_factory = FLOCKER_RESTART_POLICY_NAME_TO_POLICY[policy_name]
policy = policy_factory(**restart_policy)
# Create Application object, add to Deployment, save.
application = Application(
name=name,
image=DockerImage.from_string(image),
ports=frozenset(application_ports),
environment=environment,
volume=attached_volume,
restart_policy=policy,
cpu_shares=cpu_shares,
memory_limit=memory_limit,
links=application_links,
command_line=command_line,
)
new_node_config = node.transform(
["applications"],
lambda s: s.add(application)
)
new_deployment = deployment.update_node(new_node_config)
saving = self.persistence_service.save(new_deployment)
# Return passed in dictionary with CREATED response code.
def saved(_):
result = container_configuration_response(application, node_uuid)
return EndpointResponse(CREATED, result)
saving.addCallback(saved)
return saving
@app.route("/configuration/containers/<name>", methods=['POST'])
@user_documentation(
u"""
This will lead to the container being relocated to the specified host
and restarted. This will also update the primary host of any attached
datasets.
""",
header=u"Update a named container's configuration",
examples=[u"move container"],
section=u"container",
)
@structured(
inputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_container_update',
},
outputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_container',
},
schema_store=SCHEMAS,
)
def update_containers_configuration(self, name, node_uuid):
"""
Update the specified container's configuration.
:param unicode name: A unique identifier for the container within
the Flocker cluster.
:param unicode node_uuid: The address of the node on which the
container will run.
:return: An ``EndpointResponse`` describing the container which has
been updated.
"""
deployment = self.persistence_service.get()
node_uuid = UUID(hex=node_uuid)
target_node = deployment.get_node(node_uuid)
for node in deployment.nodes:
for application in node.applications:
if application.name == name:
deployment = deployment.move_application(
application, target_node
)
saving = self.persistence_service.save(deployment)
def saved(_):
result = container_configuration_response(
application, node_uuid
)
return EndpointResponse(OK, result)
saving.addCallback(saved)
return saving
# Didn't find the application:
raise CONTAINER_NOT_FOUND
@app.route("/configuration/containers/<name>", methods=['DELETE'])
@user_documentation(
u"""
This will lead to the container being stopped and not being
restarted again. Any datasets that were attached as volumes will
continue to exist on the cluster.
""",
header=u"Remove a container from the configuration",
examples=[
u"remove a container",
u"remove a container with unknown name",
],
section=u"container",
)
@structured(
inputSchema={},
outputSchema={},
schema_store=SCHEMAS
)
def delete_container_configuration(self, name):
"""
Remove a container from the cluster configuration.
:param unicode name: A unique identifier for the container within
the Flocker cluster.
:return: An ``EndpointResponse``.
"""
deployment = self.persistence_service.get()
for node in deployment.nodes:
for application in node.applications:
if application.name == name:
updated_node = node.transform(
["applications"], lambda s: s.remove(application))
d = self.persistence_service.save(
deployment.update_node(updated_node))
d.addCallback(lambda _: None)
return d
# Didn't find the application:
raise CONTAINER_NOT_FOUND
@app.route("/state/nodes", methods=['GET'])
@user_documentation(
u"""
Some nodes may not be listed if their agents are disconnected from
the cluster. IP addresses may be private IP addresses that are not
publicly routable.
""",
header=u"List known nodes in the cluster",
examples=[
u"list known nodes",
],
section=u"common",
)
@structured(
inputSchema={},
outputSchema={"$ref":
'/v1/endpoints.json#/definitions/nodes_array'},
schema_store=SCHEMAS
)
def list_current_nodes(self):
return [{u"host": node.hostname, u"uuid": unicode(node.uuid)}
for node in
self.cluster_state_service.as_deployment().nodes]
@app.route("/configuration/_compose", methods=['POST'])
@private_api
@structured(
inputSchema={
'$ref':
'/v1/endpoints.json#/definitions/configuration_compose'
},
outputSchema={},
schema_store=SCHEMAS
)
def replace_configuration(self, applications, deployment):
"""
Replace the existing configuration with one given by flocker-deploy
command line tool.
:param applications: Configuration in Flocker-native or
Fig/Compose format.
:param deployment: Configuration of which applications run on
which nodes.
"""
try:
configuration = FigConfiguration(applications)
if not configuration.is_valid_format():
configuration = FlockerConfiguration(applications)
return self.persistence_service.save(model_from_configuration(
deployment_state=self.cluster_state_service.as_deployment(),
applications=configuration.applications(),
deployment_configuration=deployment))
except ConfigurationError as e:
raise make_bad_request(code=BAD_REQUEST, description=unicode(e))
def _find_manifestation_and_node(deployment, dataset_id):
"""
Given the ID of a dataset, find its primary manifestation and the node
it's on.
:param unicode dataset_id: The unique identifier of the dataset. This
is a string giving a UUID (per RFC 4122).
:return: Tuple containing the primary ``Manifestation`` and the
``Node`` it is on.
"""
manifestations_and_nodes = manifestations_from_deployment(
deployment, dataset_id)
index = 0
for index, (manifestation, node) in enumerate(
manifestations_and_nodes):
if manifestation.primary:
primary_manifestation, origin_node = manifestation, node
break
else:
# There are no manifestations containing the requested dataset.
if index == 0:
raise DATASET_NOT_FOUND
else:
# There were no primary manifestations
raise IndexError(
'No primary manifestations for dataset: {!r}. See '
'https://clusterhq.atlassian.net/browse/FLOC-1403'.format(
dataset_id)
)
return primary_manifestation, origin_node
def _update_dataset_primary(deployment, dataset_id, primary):
"""
Update the ``deployment`` so that the ``Dataset`` with the supplied
``dataset_id`` is on the ``Node`` with the supplied ``primary`` address.
:param Deployment deployment: The deployment containing the dataset to be
updated.
:param unicode dataset_id: The ID of the dataset to be updated.
:param UUID primary: The UUID of the new primary node of the
supplied ``dataset_id``.
:returns: An updated ``Deployment``.
"""
primary_manifestation, old_primary_node = _find_manifestation_and_node(
deployment, dataset_id
)
# Now construct a new_deployment where the primary manifestation of the
# dataset is on the requested primary node.
old_primary_node = old_primary_node.transform(
("manifestations", primary_manifestation.dataset_id), discard
)
deployment = deployment.update_node(old_primary_node)
new_primary_node = deployment.get_node(primary)
new_primary_node = new_primary_node.transform(
("manifestations", dataset_id), primary_manifestation
)
deployment = deployment.update_node(new_primary_node)
return deployment
def _update_dataset_maximum_size(deployment, dataset_id, maximum_size):
"""
Update the ``deployment`` so that the ``Dataset`` with the supplied
``dataset_id`` has the supplied ``maximum_size``.
:param Deployment deployment: The deployment containing the dataset to be
updated.
:param unicode dataset_id: The ID of the dataset to be updated.
:param maximum_size: The new size of the dataset or ``None`` to remove the
size limit.
:returns: An updated ``Deployment``.
"""
manifestation, node = _find_manifestation_and_node(deployment, dataset_id)
deployment = deployment.set(nodes=deployment.nodes.discard(node))
node = node.transform(
['manifestations', dataset_id, 'dataset', 'maximum_size'],
maximum_size
)
return deployment.set(nodes=deployment.nodes.add(node))
def manifestations_from_deployment(deployment, dataset_id):
"""
Extract all other manifestations of the supplied dataset_id from the
supplied deployment.
:param Deployment deployment: A ``Deployment`` describing the state
of the cluster.
:param unicode dataset_id: The uuid of the ``Dataset`` for the
``Manifestation`` s that are to be returned.
:return: Iterable returning all manifestations of the supplied
``dataset_id``.
"""
for node in deployment.nodes:
if dataset_id in node.manifestations:
yield node.manifestations[dataset_id], node
def datasets_from_deployment(deployment):
"""
Extract the primary datasets from the supplied deployment instance.
Currently does not support secondary datasets, but this info might be
useful to provide. For ZFS, for example, may show how up-to-date they
are with respect to the primary.
:param Deployment deployment: A ``Deployment`` describing the state
of the cluster.
:return: Iterable returning all datasets.
"""
for node in deployment.nodes:
if node.manifestations is None:
continue
for manifestation in node.manifestations.values():
if manifestation.primary:
# There may be multiple datasets marked as primary until we
# implement consistency checking when state is reported by each
# node.
# See https://clusterhq.atlassian.net/browse/FLOC-1303
yield api_dataset_from_dataset_and_node(
manifestation.dataset, node.uuid
)
def containers_from_deployment(deployment):
"""
Extract the containers from the supplied deployment instance.
:param Deployment deployment: A ``Deployment`` describing the state
of the cluster.
:return: Iterable returning all containers.
"""
for node in deployment.nodes:
for application in node.applications:
yield container_configuration_response(application, node.uuid)
def container_configuration_response(application, node):
"""
Return a container dict which confirms to
``/v1/endpoints.json#/definitions/configuration_container``
:param Application application: An ``Application`` instance.
:param UUID node: The host on which this application is running.
:return: A ``dict`` containing the container configuration.
"""
result = {
"node_uuid": unicode(node), "name": application.name,
}
result.update(ApplicationMarshaller(application).convert())
# Configuration format isn't quite the same as JSON format:
if u"volume" in result:
# Config format includes maximum_size, which we don't want:
volume = result.pop(u"volume")
result[u"volumes"] = [{u"dataset_id": volume[u"dataset_id"],
u"mountpoint": volume[u"mountpoint"]}]
if application.cpu_shares is not None:
result["cpu_shares"] = application.cpu_shares
if application.memory_limit is not None:
result["memory_limit"] = application.memory_limit
if application.command_line is not None:
result["command_line"] = list(application.command_line)
return result
def api_dataset_from_dataset_and_node(dataset, node_uuid):
"""
Return a dataset dict which conforms to
``/v1/endpoints.json#/definitions/configuration_datasets_array``
:param Dataset dataset: A dataset present in the cluster.
:param UUID node_uuid: UUID of the primary node for the
`dataset`.
:return: A ``dict`` containing the dataset information and the
hostname of the primary node, conforming to
``/v1/endpoints.json#/definitions/configuration_datasets_array``.
"""
result = dict(
dataset_id=dataset.dataset_id,
deleted=dataset.deleted,
primary=unicode(node_uuid),
metadata=thaw(dataset.metadata)
)
if dataset.maximum_size is not None:
result[u'maximum_size'] = dataset.maximum_size
return result
def create_api_service(persistence_service, cluster_state_service, endpoint,
context_factory):
"""
Create a Twisted Service that serves the API on the given endpoint.
:param ConfigurationPersistenceService persistence_service: Service
for retrieving and setting desired configuration.
:param ClusterStateService cluster_state_service: Service that
knows about the current state of the cluster.
:param endpoint: Twisted endpoint to listen on.
:param context_factory: TLS context factory.
:return: Service that will listen on the endpoint using HTTP API server.
"""
api_root = Resource()
user = ConfigurationAPIUserV1(persistence_service, cluster_state_service)
api_root.putChild('v1', user.app.resource())
api_root._v1_user = user # For unit testing purposes, alas
return StreamServerEndpointService(
endpoint,
TLSMemoryBIOFactory(
context_factory,
False,
Site(api_root)
)
)
| apache-2.0 |
Forever-Young/pytils | pytils/utils.py | 2 | 1782 | # -*- coding: utf-8 -*-
# -*- test-case-name: pytils.test.test_utils -*-
"""
Misc utils for internal use
"""
from pytils.third import six
def check_length(value, length):
"""
Checks length of value
@param value: value to check
@type value: C{str}
@param length: length checking for
@type length: C{int}
@return: None when check successful
@raise ValueError: check failed
"""
_length = len(value)
if _length != length:
raise ValueError("length must be %d, not %d" % \
(length, _length))
def check_positive(value, strict=False):
"""
Checks if variable is positive
@param value: value to check
@type value: C{integer types}, C{float} or C{Decimal}
@return: None when check successful
@raise ValueError: check failed
"""
if not strict and value < 0:
raise ValueError("Value must be positive or zero, not %s" % str(value))
if strict and value <= 0:
raise ValueError("Value must be positive, not %s" % str(value))
def split_values(ustring, sep=u','):
"""
Splits unicode string with separator C{sep},
but skips escaped separator.
@param ustring: string to split
@type ustring: C{unicode}
@param sep: separator (default to u',')
@type sep: C{unicode}
@return: tuple of splitted elements
"""
assert isinstance(ustring, six.text_type), "uvalue must be unicode, not %s" % type(ustring)
# unicode have special mark symbol 0xffff which cannot be used in a regular text,
# so we use it to mark a place where escaped column was
ustring_marked = ustring.replace(u'\,', u'\uffff')
items = tuple([i.strip().replace(u'\uffff', u',') for i in ustring_marked.split(sep)])
return items
| mit |
henry808/euler | 013/eul013.py | 1 | 5836 | #! /usr/bin/python
from __future__ import print_function
# Project Euler # 13
n = [37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676,
89261670696623633820136378418383684178734361726757,
28112879812849979408065481931592621691275889832738,
44274228917432520321923589422876796487670272189318,
47451445736001306439091167216856844588711603153276,
70386486105843025439939619828917593665686757934951,
62176457141856560629502157223196586755079324193331,
64906352462741904929101432445813822663347944758178,
92575867718337217661963751590579239728245598838407,
58203565325359399008402633568948830189458628227828,
80181199384826282014278194139940567587151170094390,
35398664372827112653829987240784473053190104293586,
86515506006295864861532075273371959191420517255829,
71693888707715466499115593487603532921714970056938,
54370070576826684624621495650076471787294438377604,
53282654108756828443191190634694037855217779295145,
36123272525000296071075082563815656710885258350721,
45876576172410976447339110607218265236877223636045,
17423706905851860660448207621209813287860733969412,
81142660418086830619328460811191061556940512689692,
51934325451728388641918047049293215058642563049483,
62467221648435076201727918039944693004732956340691,
15732444386908125794514089057706229429197107928209,
55037687525678773091862540744969844508330393682126,
18336384825330154686196124348767681297534375946515,
80386287592878490201521685554828717201219257766954,
78182833757993103614740356856449095527097864797581,
16726320100436897842553539920931837441497806860984,
48403098129077791799088218795327364475675590848030,
87086987551392711854517078544161852424320693150332,
59959406895756536782107074926966537676326235447210,
69793950679652694742597709739166693763042633987085,
41052684708299085211399427365734116182760315001271,
65378607361501080857009149939512557028198746004375,
35829035317434717326932123578154982629742552737307,
94953759765105305946966067683156574377167401875275,
88902802571733229619176668713819931811048770190271,
25267680276078003013678680992525463401061632866526,
36270218540497705585629946580636237993140746255962,
24074486908231174977792365466257246923322810917141,
91430288197103288597806669760892938638285025333403,
34413065578016127815921815005561868836468420090470,
23053081172816430487623791969842487255036638784583,
11487696932154902810424020138335124462181441773470,
63783299490636259666498587618221225225512486764533,
67720186971698544312419572409913959008952310058822,
95548255300263520781532296796249481641953868218774,
76085327132285723110424803456124867697064507995236,
37774242535411291684276865538926205024910326572967,
23701913275725675285653248258265463092207058596522,
29798860272258331913126375147341994889534765745501,
18495701454879288984856827726077713721403798879715,
38298203783031473527721580348144513491373226651381,
34829543829199918180278916522431027392251122869539,
40957953066405232632538044100059654939159879593635,
29746152185502371307642255121183693803580388584903,
41698116222072977186158236678424689157993532961922,
62467957194401269043877107275048102390895523597457,
23189706772547915061505504953922979530901129967519,
86188088225875314529584099251203829009407770775672,
11306739708304724483816533873502340845647058077308,
82959174767140363198008187129011875491310547126581,
97623331044818386269515456334926366572897563400500,
42846280183517070527831839425882145521227251250327,
55121603546981200581762165212827652751691296897789,
32238195734329339946437501907836945765883352399886,
75506164965184775180738168837861091527357929701337,
62177842752192623401942399639168044983993173312731,
32924185707147349566916674687634660915035914677504,
99518671430235219628894890102423325116913619626622,
73267460800591547471830798392868535206946944540724,
76841822524674417161514036427982273348055556214818,
97142617910342598647204516893989422179826088076852,
87783646182799346313767754307809363333018982642090,
10848802521674670883215120185883543223812876952786,
71329612474782464538636993009049310363619763878039,
62184073572399794223406235393808339651327408011116,
66627891981488087797941876876144230030984490851411,
60661826293682836764744779239180335110989069790714,
85786944089552990653640447425576083659976645795096,
66024396409905389607120198219976047599490197230297,
64913982680032973156037120041377903785566085089252,
16730939319872750275468906903707539413042652315011,
94809377245048795150954100921645863754710598436791,
78639167021187492431995700641917969777599028300699,
15368713711936614952811305876380278410754449733078,
40789923115535562561142322423255033685442488917353,
44889911501440648020369068063960672322193204149535,
41503128880339536053299340368006977710650566631954,
81234880673210146739058568557934581403627822703280,
82616570773948327592232845941706525094512325230608,
22918802058777319719839450180888072429661980811197,
77158542502016545090413245809786882778948721859617,
72107838435069186155435662884062257473692284509516,
20849603980134001723930671666823555245252804609722,
53503534226472524250874054075591789781264330331690]
if __name__ == '__main__':
print(str(sum(n))[:10])
| mit |
coxmediagroup/django-threadedcomments | legacy_threadedcomments/utils.py | 15 | 1165 | from django.core.serializers import serialize
from django.http import HttpResponse
from django.utils import simplejson
from django.utils.functional import Promise
from django.utils.encoding import force_unicode
class LazyEncoder(simplejson.JSONEncoder):
def default(self, obj):
if isinstance(obj, Promise):
return force_unicode(obj)
return obj
class JSONResponse(HttpResponse):
"""
A simple subclass of ``HttpResponse`` which makes serializing to JSON easy.
"""
def __init__(self, object, is_iterable = True):
if is_iterable:
content = serialize('json', object)
else:
content = simplejson.dumps(object, cls=LazyEncoder)
super(JSONResponse, self).__init__(content, mimetype='application/json')
class XMLResponse(HttpResponse):
"""
A simple subclass of ``HttpResponse`` which makes serializing to XML easy.
"""
def __init__(self, object, is_iterable = True):
if is_iterable:
content = serialize('xml', object)
else:
content = object
super(XMLResponse, self).__init__(content, mimetype='application/xml') | bsd-3-clause |
LennonLab/simplex | tools/DiversityTools/macroecotools/macroecotools.py | 12 | 16643 | """Module for conducting standard macroecological plots and analyses"""
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
import colorsys
from numpy import log10
import pandas
import numpy
import math
def AIC(k, L):
"""Computes the Akaike Information Criterion.
Keyword arguments:
L -- log likelihood value of given distribution.
k -- number of fitted parameters.
"""
AIC = 2 * k - 2 * L
return AIC
def AICc(k, L, n):
"""Computes the corrected Akaike Information Criterion.
Keyword arguments:
L -- log likelihood value of given distribution.
k -- number of fitted parameters.
n -- number of observations.
"""
AICc = 2 * k - 2 * L + 2 * k * (k + 1) / (n - k - 1)
return AICc
def aic_weight(AICc_list, n, cutoff = 4):
"""Computes Akaike weight for one model relative to others
Based on information from Burnham and Anderson (2002).
Keyword arguments:
n -- number of observations.
cutoff -- minimum number of observations required to generate a weight.
AICc_list -- list of AICc values for each model
"""
if n < cutoff:
AICc_weights = None
else:
AICc_min = min(AICc_list) # Minimum AICc value for the entire list
relative_likelihoods = []
for AICc in AICc_list:
delta_AICc = AICc - AICc_min
relative_likelihood = np.exp(-(delta_AICc)/2)
relative_likelihoods.append(relative_likelihood)
relative_likelihoods = np.array(relative_likelihoods)
AICc_weights = relative_likelihoods / sum(relative_likelihoods)
return(AICc_weights)
def get_pred_iterative(cdf_obs, dist, *pars):
"""Function to get predicted abundances (reverse-sorted) for distributions with no analytical ppf."""
cdf_obs = np.sort(cdf_obs)
abundance = list(np.empty([len(cdf_obs)]))
j = 0
cdf_cum = 0
i = 1
while j < len(cdf_obs):
cdf_cum += dist.pmf(i, *pars)
while cdf_cum >= cdf_obs[j]:
abundance[j] = i
j += 1
if j == len(cdf_obs):
abundance.reverse()
return np.array(abundance)
i += 1
def get_rad_from_cdf(dist, S, *args):
"""Return a predicted rank-abundance distribution from a theoretical CDF
Keyword arguments:
dist -- a distribution class
S -- the number of species for which the RAD should be predicted. Should
match the number of species in the community if comparing to empirical data.
args -- arguments for dist
Finds the predicted rank-abundance distribution that results from a
theoretical cumulative distribution function, by rounding the value of the
cdf evaluated at 1 / S * (Rank - 0.5) to the nearest integer
"""
emp_cdf = [(S - i + 0.5) / S for i in range(1, S + 1)]
try: rad = int(np.round(dist.ppf(emp_cdf, *args)))
except: rad = get_pred_iterative(emp_cdf, dist, *args)
return np.array(rad)
def get_emp_cdf(dat):
"""Compute the empirical cdf given a list or an array"""
dat = np.array(dat)
emp_cdf = []
for point in dat:
point_cdf = len(dat[dat <= point]) / len(dat)
emp_cdf.append(point_cdf)
return np.array(emp_cdf)
def hist_pmf(xs, pmf, bins):
"""Create a histogram based on a probability mass function
Creates a histogram by combining the pmf values inside a series of bins
Args:
xs: Array-like list of x values that the pmf values come from
pmf: Array-like list of pmf values associate with the values of x
bins: Array-like list of bin edges
Returns:
hist: Array of values of the histogram
bin_edges: Array of value of the bin edges
"""
xs = np.array(xs)
pmf = np.array(pmf)
bins = np.array(bins)
hist = []
for lower_edge_index in range(len(bins) - 1):
if lower_edge_index + 1 == len(bins):
hist.append(sum(pmf[(xs >= bins[lower_edge_index]) &
(xs <= bins[lower_edge_index + 1])]))
else:
hist.append(sum(pmf[(xs >= bins[lower_edge_index]) &
(xs < bins[lower_edge_index + 1])]))
hist = np.array(hist)
return (hist, bins)
def plot_rad(Ns):
"""Plot a rank-abundance distribution based on a vector of abundances"""
Ns.sort(reverse=True)
rank = range(1, len(Ns) + 1)
plt.plot(rank, Ns, 'bo-')
plt.xlabel('Rank')
plt.ylabel('Abundance')
def get_rad_data(Ns):
"""Provide ranks and relative abundances for a vector of abundances"""
Ns = np.array(Ns)
Ns_sorted = -1 * np.sort(-1 * Ns)
relab_sorted = Ns_sorted / sum(Ns_sorted)
rank = range(1, len(Ns) + 1)
return (rank, relab_sorted)
def preston_sad(abund_vector, b=None, normalized = 'no'):
"""Plot histogram of species abundances on a log2 scale"""
if b == None:
q = np.exp2(list(range(0, 25)))
b = q [(q <= max(abund_vector)*2)]
if normalized == 'no':
hist_ab = np.histogram(abund_vector, bins = b)
if normalized == 'yes':
hist_ab_norm = np.histogram(abund_vector, bins = b)
hist_ab_norm1 = hist_ab_norm[0]/(b[0:len(hist_ab_norm[0])])
hist_ab_norm2 = hist_ab_norm[1][0:len(hist_ab_norm[0])]
hist_ab = (hist_ab_norm1, hist_ab_norm2)
return hist_ab
def plot_SARs(list_of_A_and_S):
"""Plot multiple SARs on a single plot.
Input: a list of lists, each sublist contains one vector for S and one vector for A.
Output: a graph with SARs plotted on log-log scale, with colors spanning the spectrum.
"""
N = len(list_of_A_and_S)
HSV_tuples = [(x * 1.0 / N, 0.5, 0.5) for x in range(N)]
RGB_tuples = map(lambda x: colorsys.hsv_to_rgb(*x), HSV_tuples)
for i in range(len(list_of_A_and_S)):
sublist = list_of_A_and_S[i]
plt.loglog(sublist[0], sublist[1], color = RGB_tuples[i])
plt.hold(False)
plt.xlabel('Area')
plt.ylabel('Richness')
def points_on_circle(center, radius, n = 100):
"""Generate n equally spaced points on a circle, given its center and radius."""
x0, y0 = center
points = [(math.cos(2 * math.pi / n * x) * radius + x0, math.sin(2 * math.pi / n * x) * radius + y0) for x in xrange(0, n + 1)]
return points
def count_pts_within_radius(x, y, radius, logscale=0):
"""Count the number of points within a fixed radius in 2D space"""
#TODO: see if we can improve performance using KDTree.query_ball_point
#http://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.KDTree.query_ball_point.html
#instead of doing the subset based on the circle
unique_points = set([(x[i], y[i]) for i in range(len(x))])
count_data = []
logx, logy, logr = log10(x), log10(y), log10(radius)
for a, b in unique_points:
if logscale == 1:
loga, logb = log10(a), log10(b)
num_neighbors = len(x[((logx - loga) ** 2 +
(logy - logb) ** 2) <= logr ** 2])
else:
num_neighbors = len(x[((x - a) ** 2 + (y - b) ** 2) <= radius ** 2])
count_data.append((a, b, num_neighbors))
return count_data
def plot_color_by_pt_dens(x, y, radius, loglog=0, plot_obj=None):
"""Plot bivariate relationships with large n using color for point density
Inputs:
x & y -- variables to be plotted
radius -- the linear distance within which to count points as neighbors
loglog -- a flag to indicate the use of a loglog plot (loglog = 1)
The color of each point in the plot is determined by the logarithm (base 10)
of the number of points that occur with a given radius of the focal point,
with hotter colors indicating more points. The number of neighboring points
is determined in linear space regardless of whether a loglog plot is
presented.
"""
plot_data = count_pts_within_radius(x, y, radius, loglog)
sorted_plot_data = np.array(sorted(plot_data, key=lambda point: point[2]))
if plot_obj == None:
plot_obj = plt.axes()
if loglog == 1:
plot_obj.set_xscale('log')
plot_obj.set_yscale('log')
plot_obj.scatter(sorted_plot_data[:, 0], sorted_plot_data[:, 1],
c = np.sqrt(sorted_plot_data[:, 2]), edgecolors='none')
plot_obj.set_xlim(min(x) * 0.5, max(x) * 2)
plot_obj.set_ylim(min(y) * 0.5, max(y) * 2)
else:
plot_obj.scatter(sorted_plot_data[:, 0], sorted_plot_data[:, 1],
c = log10(sorted_plot_data[:, 2]), edgecolors='none')
return plot_obj
def e_var(abundance_data):
"""Calculate Smith and Wilson's (1996; Oikos 76:70-82) evenness index (Evar)
Input:
abundance_data = list of abundance fo all species in a community
"""
S = len(abundance_data)
ln_nj_over_S=[]
for i in range(0, S):
v1 = (np.log(abundance_data[i]))/S
ln_nj_over_S.append(v1)
ln_ni_minus_above=[]
for i in range(0, S):
v2 = ((np.log(abundance_data[i])) - sum(ln_nj_over_S)) ** 2
ln_ni_minus_above.append(v2)
return(1 - ((2 / np.pi) * np.arctan(sum(ln_ni_minus_above) / S)))
def obs_pred_rsquare(obs, pred):
"""Determines the prop of variability in a data set accounted for by a model
In other words, this determines the proportion of variation explained by
the 1:1 line in an observed-predicted plot.
"""
return 1 - sum((obs - pred) ** 2) / sum((obs - np.mean(obs)) ** 2)
def obs_pred_mse(obs, pred):
"""Calculate the mean squared error of the prediction given observation."""
return sum((obs - pred) ** 2) / len(obs)
def comp_ed (spdata1,abdata1,spdata2,abdata2):
"""Calculate the compositional Euclidean Distance between two sites
Ref: Thibault KM, White EP, Ernest SKM. 2004. Temporal dynamics in the
structure and composition of a desert rodent community. Ecology. 85:2649-2655.
"""
abdata1 = (abdata1 * 1.0) / sum(abdata1)
abdata2 = (abdata2 * 1.0) / sum(abdata2)
intersect12 = set(spdata1).intersection(spdata2)
setdiff12 = np.setdiff1d(spdata1,spdata2)
setdiff21 = np.setdiff1d(spdata2,spdata1)
relab1 = np.concatenate(((abdata1[np.setmember1d(spdata1,list(intersect12)) == 1]),
abdata1[np.setmember1d(spdata1,setdiff12)],
np.zeros(len(setdiff21))))
relab2 = np.concatenate((abdata2[np.setmember1d(spdata2,list(intersect12)) == 1],
np.zeros(len(setdiff12)),
abdata2[np.setmember1d(spdata2,setdiff21)]))
return np.sqrt(sum((relab1 - relab2) ** 2))
def calc_comp_eds(ifile, fout):
"""Calculate Euclidean distances in species composition across sites.
Determines the Euclidean distances among all possible pairs of sites and
saves the results to a file
Inputs:
ifile -- ifile = np.genfromtxt(input_filename, dtype = "S15,S15,i8",
names = ['site','species','ab'], delimiter = ",")
fout -- fout = csv.writer(open(output_filename,'ab'))
"""
#TODO - Remove reliance on on names of columns in input
# Possibly move to 3 1-D arrays for input rather than the 2-D with 3 columns
# Return result rather than writing to file
usites = np.sort(list(set(ifile["site"])))
for i in range (0, len(usites)-1):
spdata1 = ifile["species"][ifile["site"] == usites[i]]
abdata1 = ifile["ab"][ifile["site"] == usites[i]]
for a in range (i+1,len(usites)):
spdata2 = ifile["species"][ifile["site"] == usites[a]]
abdata2 = ifile["ab"][ifile["site"] == usites[a]]
ed = comp_ed (spdata1,abdata1,spdata2,abdata2)
results = np.column_stack((usites[i], usites[a], ed))
fout.writerows(results)
def combined_spID(*species_identifiers):
"""Return a single column unique species identifier
Creates a unique species identifier based on one or more columns of a
data frame that represent the unique species ID.
Args:
species_identifiers: A tuple containing one or pieces of a unique
species identifier or lists of these pieces.
Returns:
A single unique species identifier or a list of single identifiers
"""
# Make standard input data types capable of element wise summation
input_type = type(species_identifiers[0])
assert input_type in [list, tuple, str, pandas.core.series.Series, numpy.ndarray]
if input_type is not str:
species_identifiers = [pandas.Series(identifier) for identifier in species_identifiers]
single_identifier = species_identifiers[0]
if len(species_identifiers) > 1:
for identifier in species_identifiers[1:]:
single_identifier += identifier
if input_type == numpy.ndarray:
single_identifier = numpy.array(single_identifier)
else:
single_identifier = input_type(single_identifier)
return single_identifier
def richness_in_group(composition_data, group_cols, spid_cols):
"""Determine the number of species in a grouping (e.g., at each site)
Counts the number of species grouped at one or more levels. For example,
the number of species occuring at each of a series of sites or in each of
a series of years.
If a combination of grouping variables is not present in the data, then no
values will be returned for that combination. In other words, if these
missing combinations should be treated as zeros, this will need to be
handled elsewhere.
Args:
composition_data: A Pandas data frame with one or more columns with
information on species identity and one or more columns with
information on the groups, e.g., years or sites.
group_cols: A list of strings of the names othe columns in
composition_data that hold the grouping fields.
spid_cols: A list of strings of the names of the columns in
composition_data that hold the data on species ID. This could be a
single column with a unique ID or two columns containing the latin binomial.
Returns:
A data frame with the grouping fields and the species richness
"""
spid_series = [composition_data[spid_col] for spid_col in spid_cols]
single_spid = combined_spID(*spid_series)
composition_data['_spid'] = single_spid
richness = composition_data.groupby(group_cols)._spid.nunique()
richness = richness.reset_index()
richness.columns = group_cols + ['richness']
del composition_data['_spid']
return richness
def abundance_in_group(composition_data, group_cols, abund_col=None):
"""Determine the number of individuals in a grouping (e.g., at each site)
Counts the number of individuals grouped at one or more levels. For example,
the number of species occuring at each of a series of sites or in each of
a series of genus-species combinations.
If a combination of grouping variables is not present in the data, then no
values will be returned for that combination. In other words, if these
missing combinations should be treated as zeros, this will need to be
handled elsewhere.
Args:
composition_data: A Pandas data frame with one or more columns with
information on species identity and one or more columns with
information on the groups, e.g., years or sites, and a column
containing the abundance value.
group_cols: A list of strings of the names othe columns in
composition_data that hold the grouping fields.
abund_col: A column containing abundance data. If this column is not
provided then it is assumed that the abundances are to be obtained
by counting the number of rows in the group (e.g., there is one
sample per individual in many ecological datasets)
Returns:
A data frame with the grouping fields and the species richness
"""
if abund_col:
abundance = composition_data[group_cols + abund_col].groupby(group_cols).sum()
else:
abundance = composition_data[group_cols].groupby(group_cols).size()
abundance = pandas.DataFrame(abundance)
abundance.columns = ['abundance']
abundance = abundance.reset_index()
return abundance
| mit |
anryko/ansible | test/units/modules/network/slxos/test_slxos_l2_interface.py | 38 | 5752 | #
# (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from units.compat.mock import patch
from units.modules.utils import set_module_args
from ansible.modules.network.slxos import slxos_l2_interface
from .slxos_module import TestSlxosModule, load_fixture
class TestSlxosL2InterfaceModule(TestSlxosModule):
module = slxos_l2_interface
def setUp(self):
super(TestSlxosL2InterfaceModule, self).setUp()
self._patch_get_config = patch(
'ansible.modules.network.slxos.slxos_l2_interface.get_config'
)
self._patch_load_config = patch(
'ansible.modules.network.slxos.slxos_l2_interface.load_config'
)
self._patch_run_commands = patch(
'ansible.modules.network.slxos.slxos_l2_interface.run_commands'
)
self._get_config = self._patch_get_config.start()
self._load_config = self._patch_load_config.start()
self._run_commands = self._patch_run_commands.start()
self._run_commands.side_effect = self.run_commands_load_fixtures
def run_commands_load_fixtures(self, module, commands, *args, **kwargs):
return self.load_fixtures(
commands,
destination=self._run_commands,
return_values=True
)
def tearDown(self):
super(TestSlxosL2InterfaceModule, self).tearDown()
self._patch_get_config.stop()
self._patch_load_config.stop()
self._patch_run_commands.stop()
def load_fixtures(self, commands=None, destination=None, return_values=False):
side_effects = []
if not destination:
destination = self._get_config
if not commands:
commands = ['slxos_config_config.cfg']
for command in commands:
filename = str(command).replace(' ', '_')
filename = str(filename).replace('/', '_')
side_effects.append(load_fixture(filename))
if return_values is True:
return side_effects
destination.side_effect = side_effects
return None
def test_slxos_l2_interface_access_vlan(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mode='access',
access_vlan=200,
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface ethernet 0/2',
'switchport access vlan 200'
],
'changed': True,
'warnings': []
}
)
def test_slxos_l2_interface_vlan_does_not_exist(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mode='access',
access_vlan=10,
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'You are trying to configure a VLAN on an interface '
'that\ndoes not exist on the switch yet!',
'failed': True,
'vlan': '10'
}
)
def test_slxos_l2_interface_incorrect_state(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/3',
mode='access',
access_vlan=10,
))
result = self.execute_module(failed=True)
self.assertEqual(
result,
{
'msg': 'Ensure interface is configured to be a L2\nport first '
'before using this module. You can use\nthe slxos_'
'interface module for this.',
'failed': True
}
)
def test_slxos_l2_interface_trunk(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/4',
mode='trunk',
native_vlan='22',
trunk_allowed_vlans='200,22'
))
result = self.execute_module(changed=True)
self.assertEqual(
result,
{
'commands': [
'interface ethernet 0/4',
'switchport trunk allowed vlan add 200,22',
'switchport trunk native vlan 22'
],
'changed': True,
'warnings': []
}
)
def test_slxos_l2_interface_invalid_argument(self, *args, **kwargs):
set_module_args(dict(
name='Ethernet 0/2',
mode='access',
access_vlan=10,
shawshank='Redemption'
))
result = self.execute_module(failed=True)
self.assertEqual(result['failed'], True)
self.assertTrue(re.match(
r'Unsupported parameters for \((basic.py|basic.pyc)\) module: '
'shawshank Supported parameters include: access_vlan, aggregate, '
'mode, name, native_vlan, state, trunk_allowed_vlans, trunk_vlans',
result['msg']
))
| gpl-3.0 |
habibmasuro/bitcoin | contrib/spendfrom/spendfrom.py | 680 | 10053 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18332 if testnet else 8332
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
2014c2g8/c2g8 | w2/static/Brython2.0.0-20140209-164925/Lib/tempfile.py | 728 | 22357 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import warnings as _warnings
import sys as _sys
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except OSError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# Although it does not have an underscore for historical reasons, this
# variable is an internal implementation detail (see issue 10354).
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
f = open(fn)
f.close()
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in "123456"]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if _os.name == 'nt':
continue
else:
raise
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
return iter(self.file)
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix="", prefix=template, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# hget double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = OSError
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
| gpl-2.0 |
Stefan-Schmidt/linux-2.6 | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
mikeckennedy/suds_python | suds/xsd/sxbuiltin.py | 3 | 7240 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbuiltin} module provides classes that represent
XSD I{builtin} schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.sax.date import *
from suds.xsd.sxbase import XBuiltin
import datetime as dt
log = getLogger(__name__)
class XString(XBuiltin):
"""
Represents an (xsd) <xs:string/> node
"""
pass
class XAny(XBuiltin):
"""
Represents an (xsd) <any/> node
"""
def __init__(self, schema, name):
XBuiltin.__init__(self, schema, name)
self.nillable = False
def get_child(self, name):
child = XAny(self.schema, name)
return (child, [])
def any(self):
return True
class XBoolean(XBuiltin):
"""
Represents an (xsd) boolean builtin type.
"""
translation = (
{ '1':True,'true':True,'0':False,'false':False },
{ True:'true',1:'true',False:'false',0:'false' },
)
def translate(self, value, topython=True):
if topython:
if isinstance(value, str):
return XBoolean.translation[0].get(value)
else:
return None
else:
if isinstance(value, (bool,int)):
return XBoolean.translation[1].get(value)
else:
return value
class XInteger(XBuiltin):
"""
Represents an (xsd) xs:int builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, str) and len(value):
return int(value)
else:
return None
else:
if isinstance(value, int):
return str(value)
else:
return value
class XLong(XBuiltin):
"""
Represents an (xsd) xs:long builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, str) and len(value):
return int(value)
else:
return None
else:
if isinstance(value, int):
return str(value)
else:
return value
class XFloat(XBuiltin):
"""
Represents an (xsd) xs:float builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, str) and len(value):
return float(value)
else:
return None
else:
if isinstance(value, float):
return str(value)
else:
return value
class XDate(XBuiltin):
"""
Represents an (xsd) xs:date builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, str) and len(value):
return Date(value).date
else:
return None
else:
if isinstance(value, dt.date):
return str(Date(value))
else:
return value
class XTime(XBuiltin):
"""
Represents an (xsd) xs:time builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, str) and len(value):
return Time(value).time
else:
return None
else:
if isinstance(value, dt.date):
return str(Time(value))
else:
return value
class XDateTime(XBuiltin):
"""
Represents an (xsd) xs:datetime builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, str) and len(value):
return DateTime(value).datetime
else:
return None
else:
if isinstance(value, dt.date):
return str(DateTime(value))
else:
return value
class Factory:
tags =\
{
# any
'anyType' : XAny,
# strings
'string' : XString,
'normalizedString' : XString,
'ID' : XString,
'Name' : XString,
'QName' : XString,
'NCName' : XString,
'anySimpleType' : XString,
'anyURI' : XString,
'NOTATION' : XString,
'token' : XString,
'language' : XString,
'IDREFS' : XString,
'ENTITIES' : XString,
'IDREF' : XString,
'ENTITY' : XString,
'NMTOKEN' : XString,
'NMTOKENS' : XString,
# binary
'hexBinary' : XString,
'base64Binary' : XString,
# integers
'int' : XInteger,
'integer' : XInteger,
'unsignedInt' : XInteger,
'positiveInteger' : XInteger,
'negativeInteger' : XInteger,
'nonPositiveInteger' : XInteger,
'nonNegativeInteger' : XInteger,
# longs
'long' : XLong,
'unsignedLong' : XLong,
# shorts
'short' : XInteger,
'unsignedShort' : XInteger,
'byte' : XInteger,
'unsignedByte' : XInteger,
# floats
'float' : XFloat,
'double' : XFloat,
'decimal' : XFloat,
# dates & times
'date' : XDate,
'time' : XTime,
'dateTime': XDateTime,
'duration': XString,
'gYearMonth' : XString,
'gYear' : XString,
'gMonthDay' : XString,
'gDay' : XString,
'gMonth' : XString,
# boolean
'boolean' : XBoolean,
}
@classmethod
def maptag(cls, tag, fn):
"""
Map (override) tag => I{class} mapping.
@param tag: An xsd tag name.
@type tag: str
@param fn: A function or class.
@type fn: fn|class.
"""
cls.tags[tag] = fn
@classmethod
def create(cls, schema, name):
"""
Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin}
"""
fn = cls.tags.get(name)
if fn is not None:
return fn(schema, name)
else:
return XBuiltin(schema, name)
| lgpl-3.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/numpy/lib/npyio.py | 23 | 73862 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode, is_pathlib_path
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if is_pathlib_path(file):
file = str(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
elif is_pathlib_path(file):
fid = file.open("rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
elif is_pathlib_path(file):
if not file.name.endswith('.npy'):
file = file.parent / (file.name + '.npy')
fid = file.open("wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the file name if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
NumPy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
elif is_pathlib_path(file):
if not file.name.endswith('.npz'):
file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
usecols = (1,4,5) will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionadded:: 1.11.0
Also when a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as `usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if is_pathlib_path(fname):
fname = str(fname)
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if is_pathlib_path(fname):
fname = str(fname)
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| gpl-3.0 |
kawamon/hue | desktop/core/ext-py/celery-4.2.1/t/unit/backends/test_cassandra.py | 2 | 6244 | from __future__ import absolute_import, unicode_literals
from datetime import datetime
from pickle import dumps, loads
import pytest
from case import Mock, mock
from celery import states
from celery.exceptions import ImproperlyConfigured
from celery.utils.objects import Bunch
CASSANDRA_MODULES = ['cassandra', 'cassandra.auth', 'cassandra.cluster']
@mock.module(*CASSANDRA_MODULES)
class test_CassandraBackend:
def setup(self):
self.app.conf.update(
cassandra_servers=['example.com'],
cassandra_keyspace='celery',
cassandra_table='task_results',
)
def test_init_no_cassandra(self, *modules):
# should raise ImproperlyConfigured when no python-driver
# installed.
from celery.backends import cassandra as mod
prev, mod.cassandra = mod.cassandra, None
try:
with pytest.raises(ImproperlyConfigured):
mod.CassandraBackend(app=self.app)
finally:
mod.cassandra = prev
def test_init_with_and_without_LOCAL_QUROM(self, *modules):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
cons = mod.cassandra.ConsistencyLevel = Bunch(
LOCAL_QUORUM='foo',
)
self.app.conf.cassandra_read_consistency = 'LOCAL_FOO'
self.app.conf.cassandra_write_consistency = 'LOCAL_FOO'
mod.CassandraBackend(app=self.app)
cons.LOCAL_FOO = 'bar'
mod.CassandraBackend(app=self.app)
# no servers raises ImproperlyConfigured
with pytest.raises(ImproperlyConfigured):
self.app.conf.cassandra_servers = None
mod.CassandraBackend(
app=self.app, keyspace='b', column_family='c',
)
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self, *modules):
from celery.backends.cassandra import CassandraBackend
assert loads(dumps(CassandraBackend(app=self.app)))
def test_get_task_meta_for(self, *modules):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
x = mod.CassandraBackend(app=self.app)
x._connection = True
session = x._session = Mock()
execute = session.execute = Mock()
execute.return_value = [
[states.SUCCESS, '1', datetime.now(), b'', b'']
]
x.decode = Mock()
meta = x._get_task_meta_for('task_id')
assert meta['status'] == states.SUCCESS
x._session.execute.return_value = []
meta = x._get_task_meta_for('task_id')
assert meta['status'] == states.PENDING
def test_store_result(self, *modules):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
x = mod.CassandraBackend(app=self.app)
x._connection = True
session = x._session = Mock()
session.execute = Mock()
x._store_result('task_id', 'result', states.SUCCESS)
def test_process_cleanup(self, *modules):
from celery.backends import cassandra as mod
x = mod.CassandraBackend(app=self.app)
x.process_cleanup()
assert x._connection is None
assert x._session is None
def test_timeouting_cluster(self):
# Tests behavior when Cluster.connect raises
# cassandra.OperationTimedOut.
from celery.backends import cassandra as mod
class OTOExc(Exception):
pass
class VeryFaultyCluster(object):
def __init__(self, *args, **kwargs):
pass
def connect(self, *args, **kwargs):
raise OTOExc()
def shutdown(self):
pass
mod.cassandra = Mock()
mod.cassandra.OperationTimedOut = OTOExc
mod.cassandra.cluster = Mock()
mod.cassandra.cluster.Cluster = VeryFaultyCluster
x = mod.CassandraBackend(app=self.app)
with pytest.raises(OTOExc):
x._store_result('task_id', 'result', states.SUCCESS)
assert x._connection is None
assert x._session is None
x.process_cleanup() # shouldn't raise
def test_please_free_memory(self):
# Ensure that Cluster object IS shut down.
from celery.backends import cassandra as mod
class RAMHoggingCluster(object):
objects_alive = 0
def __init__(self, *args, **kwargs):
pass
def connect(self, *args, **kwargs):
RAMHoggingCluster.objects_alive += 1
return Mock()
def shutdown(self):
RAMHoggingCluster.objects_alive -= 1
mod.cassandra = Mock()
mod.cassandra.cluster = Mock()
mod.cassandra.cluster.Cluster = RAMHoggingCluster
for x in range(0, 10):
x = mod.CassandraBackend(app=self.app)
x._store_result('task_id', 'result', states.SUCCESS)
x.process_cleanup()
assert RAMHoggingCluster.objects_alive == 0
def test_auth_provider(self):
# Ensure valid auth_provider works properly, and invalid one raises
# ImproperlyConfigured exception.
from celery.backends import cassandra as mod
class DummyAuth(object):
ValidAuthProvider = Mock()
mod.cassandra = Mock()
mod.cassandra.auth = DummyAuth
# Valid auth_provider
self.app.conf.cassandra_auth_provider = 'ValidAuthProvider'
self.app.conf.cassandra_auth_kwargs = {
'username': 'stuff'
}
mod.CassandraBackend(app=self.app)
# Invalid auth_provider
self.app.conf.cassandra_auth_provider = 'SpiderManAuth'
self.app.conf.cassandra_auth_kwargs = {
'username': 'Jack'
}
with pytest.raises(ImproperlyConfigured):
mod.CassandraBackend(app=self.app)
def test_options(self):
# Ensure valid options works properly
from celery.backends import cassandra as mod
mod.cassandra = Mock()
# Valid options
self.app.conf.cassandra_options = {
'cql_version': '3.2.1',
'protocol_version': 3
}
mod.CassandraBackend(app=self.app)
| apache-2.0 |
jonathonwalz/ansible | lib/ansible/modules/system/puppet.py | 36 | 9409 | #!/usr/bin/python
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: puppet
short_description: Runs puppet
description:
- Runs I(puppet) agent or apply in a reliable manner
version_added: "2.0"
options:
timeout:
description:
- How long to wait for I(puppet) to finish.
required: false
default: 30m
puppetmaster:
description:
- The hostname of the puppetmaster to contact.
required: false
default: None
modulepath:
description:
- Path to an alternate location for puppet modules
required: false
default: None
version_added: "2.4"
manifest:
description:
- Path to the manifest file to run puppet apply on.
required: false
default: None
facts:
description:
- A dict of values to pass in as persistent external facter facts
required: false
default: None
facter_basename:
description:
- Basename of the facter output file
required: false
default: ansible
environment:
description:
- Puppet environment to be used.
required: false
default: None
logdest:
description:
- Where the puppet logs should go, if puppet apply is being used
required: false
default: stdout
choices: [ 'stdout', 'syslog' ]
version_added: "2.1"
certname:
description:
- The name to use when handling certificates.
required: false
default: None
version_added: "2.1"
tags:
description:
- A comma-separated list of puppet tags to be used.
required: false
default: None
version_added: "2.1"
execute:
description:
- Execute a specific piece of Puppet code. It has no effect with
a puppetmaster.
required: false
default: None
version_added: "2.1"
requirements: [ puppet ]
author: "Monty Taylor (@emonty)"
'''
EXAMPLES = '''
# Run puppet agent and fail if anything goes wrong
- puppet
# Run puppet and timeout in 5 minutes
- puppet:
timeout: 5m
# Run puppet using a different environment
- puppet:
environment: testing
# Run puppet using a specific certname
- puppet:
certname: agent01.example.com
# Run puppet using a specific piece of Puppet code. Has no effect with a
# puppetmaster.
- puppet:
execute: 'include ::mymodule'
# Run puppet using a specific tags
- puppet:
tags: update,nginx
'''
import os
import pipes
import stat
try:
import json
except ImportError:
try:
import simplejson as json
except ImportError:
# Let snippet from module_utils/basic.py return a proper error in this case
pass
def _get_facter_dir():
if os.getuid() == 0:
return '/etc/facter/facts.d'
else:
return os.path.expanduser('~/.facter/facts.d')
def _write_structured_data(basedir, basename, data):
if not os.path.exists(basedir):
os.makedirs(basedir)
file_path = os.path.join(basedir, "{0}.json".format(basename))
# This is more complex than you might normally expect because we want to
# open the file with only u+rw set. Also, we use the stat constants
# because ansible still supports python 2.4 and the octal syntax changed
out_file = os.fdopen(
os.open(
file_path, os.O_CREAT | os.O_WRONLY,
stat.S_IRUSR | stat.S_IWUSR), 'w')
out_file.write(json.dumps(data).encode('utf8'))
out_file.close()
def main():
module = AnsibleModule(
argument_spec=dict(
timeout=dict(default="30m"),
puppetmaster=dict(required=False, default=None),
modulepath=dict(required=False, default=None),
manifest=dict(required=False, default=None),
logdest=dict(
required=False, default='stdout',
choices=['stdout', 'syslog']),
show_diff=dict(
# internal code to work with --diff, do not use
default=False, aliases=['show-diff'], type='bool'),
facts=dict(default=None, type='dict'),
facter_basename=dict(default='ansible'),
environment=dict(required=False, default=None),
certname=dict(required=False, default=None),
tags=dict(required=False, default=None, type='list'),
execute=dict(required=False, default=None),
),
supports_check_mode=True,
mutually_exclusive=[
('puppetmaster', 'manifest'),
('puppetmaster', 'manifest', 'execute'),
('puppetmaster', 'modulepath')
],
)
p = module.params
global PUPPET_CMD
PUPPET_CMD = module.get_bin_path("puppet", False, ['/opt/puppetlabs/bin'])
if not PUPPET_CMD:
module.fail_json(
msg="Could not find puppet. Please ensure it is installed.")
global TIMEOUT_CMD
TIMEOUT_CMD = module.get_bin_path("timeout", False)
if p['manifest']:
if not os.path.exists(p['manifest']):
module.fail_json(
msg="Manifest file %(manifest)s not found." % dict(
manifest=p['manifest']))
# Check if puppet is disabled here
if not p['manifest']:
rc, stdout, stderr = module.run_command(
PUPPET_CMD + " config print agent_disabled_lockfile")
if os.path.exists(stdout.strip()):
module.fail_json(
msg="Puppet agent is administratively disabled.",
disabled=True)
elif rc != 0:
module.fail_json(
msg="Puppet agent state could not be determined.")
if module.params['facts'] and not module.check_mode:
_write_structured_data(
_get_facter_dir(),
module.params['facter_basename'],
module.params['facts'])
if TIMEOUT_CMD:
base_cmd = "%(timeout_cmd)s -s 9 %(timeout)s %(puppet_cmd)s" % dict(
timeout_cmd=TIMEOUT_CMD,
timeout=pipes.quote(p['timeout']),
puppet_cmd=PUPPET_CMD)
else:
base_cmd = PUPPET_CMD
if not p['manifest']:
cmd = ("%(base_cmd)s agent --onetime"
" --ignorecache --no-daemonize --no-usecacheonfailure --no-splay"
" --detailed-exitcodes --verbose --color 0") % dict(
base_cmd=base_cmd,
)
if p['puppetmaster']:
cmd += " --server %s" % pipes.quote(p['puppetmaster'])
if p['show_diff']:
cmd += " --show_diff"
if p['environment']:
cmd += " --environment '%s'" % p['environment']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if module.check_mode:
cmd += " --noop"
else:
cmd += " --no-noop"
else:
cmd = "%s apply --detailed-exitcodes " % base_cmd
if p['logdest'] == 'syslog':
cmd += "--logdest syslog "
if p['modulepath']:
cmd += "--modulepath='%s'" % p['modulepath']
if p['environment']:
cmd += "--environment '%s' " % p['environment']
if p['certname']:
cmd += " --certname='%s'" % p['certname']
if p['execute']:
cmd += " --execute '%s'" % p['execute']
if p['tags']:
cmd += " --tags '%s'" % ','.join(p['tags'])
if module.check_mode:
cmd += "--noop "
else:
cmd += "--no-noop "
cmd += pipes.quote(p['manifest'])
rc, stdout, stderr = module.run_command(cmd)
if rc == 0:
# success
module.exit_json(rc=rc, changed=False, stdout=stdout, stderr=stderr)
elif rc == 1:
# rc==1 could be because it's disabled
# rc==1 could also mean there was a compilation failure
disabled = "administratively disabled" in stdout
if disabled:
msg = "puppet is disabled"
else:
msg = "puppet did not run"
module.exit_json(
rc=rc, disabled=disabled, msg=msg,
error=True, stdout=stdout, stderr=stderr)
elif rc == 2:
# success with changes
module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
elif rc == 124:
# timeout
module.exit_json(
rc=rc, msg="%s timed out" % cmd, stdout=stdout, stderr=stderr)
else:
# failure
module.fail_json(
rc=rc, msg="%s failed with return code: %d" % (cmd, rc),
stdout=stdout, stderr=stderr)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
daniabo/kernel-HuaweiP2-6011.3.0.8 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
jbarentsen/drb | drbadmin/node_modules/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/ninja_test.py | 1843 | 1786 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the ninja.py file. """
import gyp.generator.ninja as ninja
import unittest
import StringIO
import sys
import TestCommon
class TestPrefixesAndSuffixes(unittest.TestCase):
def test_BinaryNamesWindows(self):
# These cannot run on non-Windows as they require a VS installation to
# correctly handle variable expansion.
if sys.platform.startswith('win'):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'win')
spec = { 'target_name': 'wee' }
self.assertTrue(writer.ComputeOutputFileName(spec, 'executable').
endswith('.exe'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.dll'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.lib'))
def test_BinaryNamesLinux(self):
writer = ninja.NinjaWriter('foo', 'wee', '.', '.', 'build.ninja', '.',
'build.ninja', 'linux')
spec = { 'target_name': 'wee' }
self.assertTrue('.' not in writer.ComputeOutputFileName(spec,
'executable'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
startswith('lib'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'shared_library').
endswith('.so'))
self.assertTrue(writer.ComputeOutputFileName(spec, 'static_library').
endswith('.a'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
curiosityio/taiga-docker | taiga-back/taiga-back/tests/unit/test_gravatar.py | 3 | 1253 | # Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import hashlib
from taiga.users.gravatar import get_gravatar_url
def test_get_gravatar_url():
email = "user@email.com"
email_hash = hashlib.md5(email.encode()).hexdigest()
url = get_gravatar_url(email, s=40, d="default-image-url")
assert email_hash in url
assert 's=40' in url
assert 'd=default-image-url' in url
| mit |
Carmezim/tensorflow | tensorflow/contrib/learn/python/learn/datasets/__init__.py | 121 | 3534 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Dataset utilities and synthetic/reference datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
from os import path
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.datasets import mnist
from tensorflow.contrib.learn.python.learn.datasets import synthetic
from tensorflow.contrib.learn.python.learn.datasets import text_datasets
# Export load_iris and load_boston.
load_iris = base.load_iris
load_boston = base.load_boston
# List of all available datasets.
# Note, currently they may return different types.
DATASETS = {
# Returns base.Dataset.
'iris': base.load_iris,
'boston': base.load_boston,
# Returns base.Datasets (train/validation/test sets).
'mnist': mnist.load_mnist,
'dbpedia': text_datasets.load_dbpedia,
}
# List of all synthetic datasets
SYNTHETIC = {
# All of these will return ['data', 'target'] -> base.Dataset
'circles': synthetic.circles,
'spirals': synthetic.spirals
}
def load_dataset(name, size='small', test_with_fake_data=False):
"""Loads dataset by name.
Args:
name: Name of the dataset to load.
size: Size of the dataset to load.
test_with_fake_data: If true, load with fake dataset.
Returns:
Features and labels for given dataset. Can be numpy or iterator.
Raises:
ValueError: if `name` is not found.
"""
if name not in DATASETS:
raise ValueError('Name of dataset is not found: %s' % name)
if name == 'dbpedia':
return DATASETS[name](size, test_with_fake_data)
else:
return DATASETS[name]()
def make_dataset(name, n_samples=100, noise=None, seed=42, *args, **kwargs):
"""Creates binary synthetic datasets
Args:
name: str, name of the dataset to generate
n_samples: int, number of datapoints to generate
noise: float or None, standard deviation of the Gaussian noise added
seed: int or None, seed for noise
Returns:
Shuffled features and labels for given synthetic dataset of type `base.Dataset`
Raises:
ValueError: Raised if `name` not found
Note:
- This is a generic synthetic data generator - individual generators might have more parameters!
See documentation for individual parameters
- Note that the `noise` parameter uses `numpy.random.normal` and depends on `numpy`'s seed
TODO:
- Support multiclass datasets
- Need shuffling routine. Currently synthetic datasets are reshuffled to avoid train/test correlation,
but that hurts reprodusability
"""
# seed = kwargs.pop('seed', None)
if name not in SYNTHETIC:
raise ValueError('Synthetic dataset not found or not implemeted: %s' % name)
else:
return SYNTHETIC[name](n_samples=n_samples, noise=noise, seed=seed, *args, **kwargs)
| apache-2.0 |
grit-engine/grit-engine | dependencies/quex-0.34.1/quex/core_engine/generator/action_info.py | 2 | 5824 | from quex.frs_py.file_in import is_identifier_start, \
is_identifier_continue
import quex.core_engine.generator.skip_code as skip_code
class ActionI:
def __init__(self, Code, RequireTerminatingZeroF=False):
self.__code = Code
self.__require_terminating_zero_f = RequireTerminatingZeroF
def set_code(self, Code):
self.__code = Code
def get_code(self):
return self.__code
def set_require_terminating_zero_f(self):
self.__require_terminating_zero_f = True
def require_terminating_zero_f(self):
return self.__require_terminating_zero_f
class CodeFragment(ActionI):
def __init__(self, Code="", RequireTerminatingZeroF=False):
ActionI.__init__(self, Code, RequireTerminatingZeroF)
UserCodeFragment_OpenLinePragma = {
#___________________________________________________________________________________
# Line pragmas allow to direct the 'virtual position' of a program in a file
# that was generated to its origin. That is, if an error occurs in line in a
# C-program which is actually is the pasted code from a certain line in a .qx
# file, then the compiler would print out the line number from the .qx file.
#
# This mechanism relies on line pragmas of the form '#line xx "filename"' telling
# the compiler to count the lines from xx and consider them as being from filename.
#
# During code generation, the pasted code segments need to be followed by line
# pragmas resetting the original C-file as the origin, so that errors that occur
# in the 'real' code are not considered as coming from the .qx files.
# Therefore, the code generator introduces placeholders that are to be replaced
# once the whole code is generated.
#
# ...[Language][0] = the placeholder until the whole code is generated
# ...[Language][1] = template containing 'NUMBER' and 'FILENAME' that
# are to replaced in order to get the resetting line pragma.
#___________________________________________________________________________________
"C": [
'/* POST-ADAPTION: FILL IN APPROPRIATE LINE PRAGMA */',
'#line NUMBER "FILENAME"'
],
}
class UserCodeFragment(ActionI):
def __init__(self, Code, Filename, LineN, LanguageDB=None, AddReferenceCodeF=False):
assert type(Code) in [str, unicode]
assert type(LanguageDB) == dict or LanguageDB == None
assert type(Filename) in [str, unicode]
assert type(LineN) in [int, long, float]
self.filename = Filename
self.line_n = LineN
# No clue yet, how to deal with languages other than C/C++ here.
if AddReferenceCodeF and Code != "":
txt = '\n#line %i "%s"\n' % (self.line_n, self.filename)
txt += Code
if txt[-1] != "\n": txt = txt + "\n"
txt += UserCodeFragment_OpenLinePragma["C"][0] + "\n"
code = txt
else:
code = Code
require_terminating_zero_f = False
if LanguageDB != None and LanguageDB["$require-terminating-zero-preparation"](LanguageDB, code):
require_terminating_zero_f = True
ActionI.__init__(self, code, require_terminating_zero_f)
def UserCodeFragment_straighten_open_line_pragmas(filename, Language):
if Language not in UserCodeFragment_OpenLinePragma.keys():
return
try: fh = open(filename)
except: raise "error: file to straighten line pragmas not found: '%s'" % \
filename
new_content = ""
line_n = 0
LinePragmaInfo = UserCodeFragment_OpenLinePragma[Language]
for line in fh.readlines():
line_n += 1
if line.find(LinePragmaInfo[0]) != -1:
if Language == "C":
line = LinePragmaInfo[1]
line = line.replace("NUMBER", repr(int(line_n)))
line = line.replace("FILENAME", filename)
line = line + "\n"
new_content += line
fh.close()
fh = open(filename, "w")
fh.write(new_content)
fh.close()
class PatternActionInfo:
def __init__(self, PatternStateMachine, Action, Pattern="", IL = None, ModeName=""):
assert Action == None or \
issubclass(Action.__class__, ActionI) or \
type(Action) in [str, unicode]
assert PatternStateMachine.__class__.__name__ == "StateMachine" \
or PatternStateMachine == None
self.__pattern_state_machine = PatternStateMachine
if type(Action) in [str, unicode]: self.__action = CodeFragment(Action)
else: self.__action = Action
self.pattern = Pattern
# depth of inheritance where the pattern occurs
self.inheritance_level = IL
self.inheritance_mode_name = ModeName
def pattern_state_machine(self):
return self.__pattern_state_machine
def action(self):
return self.__action
def pattern_index(self):
return self.pattern_state_machine().get_id()
def __repr__(self):
txt = ""
txt += "self.pattern = " + repr(self.pattern) + "\n"
txt += "self.pattern_state_machine = \n" + repr(self.pattern_state_machine()).replace("\n", "\n ")
txt += "self.action = " + repr(self.action().get_code()) + "\n"
if self.action().__class__ == UserCodeFragment:
txt += "self.filename = " + repr(self.action().filename) + "\n"
txt += "self.line_n = " + repr(self.action().line_n) + "\n"
txt += "self.inheritance_level = " + repr(self.inheritance_level) + "\n"
txt += "self.pattern_index = " + repr(self.pattern_state_machine().core().id()) + "\n"
return txt
| mit |
jonashaag/ansible | lib/ansible/plugins/action/synchronize.py | 35 | 10195 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
from ansible.plugins.action import ActionBase
from ansible.plugins import connection_loader
from ansible.utils.boolean import boolean
from ansible import constants as C
class ActionModule(ActionBase):
def _get_absolute_path(self, path):
if self._task._role is not None:
original_path = path
if self._task._role is not None:
path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
else:
path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path)
if original_path and original_path[-1] == '/' and path[-1] != '/':
# make sure the dwim'd path ends in a trailing "/"
# if the original path did
path += '/'
return path
def _process_origin(self, host, path, user):
if host not in C.LOCALHOST:
if user:
return '%s@[%s]:%s' % (user, host, path)
else:
return '[%s]:%s' % (host, path)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _process_remote(self, host, path, user):
transport = self._play_context.connection
if host not in C.LOCALHOST or transport != "local":
if user:
return '%s@[%s]:%s' % (user, host, path)
else:
return '[%s]:%s' % (host, path)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _override_module_replaced_vars(self, task_vars):
""" Some vars are substituted into the modules. Have to make sure
that those are correct for localhost when synchronize creates its own
connection to localhost."""
# Clear the current definition of these variables as they came from the
# connection to the remote host
if 'ansible_syslog_facility' in task_vars:
del task_vars['ansible_syslog_facility']
for key in task_vars.keys():
if key.startswith("ansible_") and key.endswith("_interpreter"):
del task_vars[key]
# Add the definitions from localhost
for host in C.LOCALHOST:
if host in task_vars['hostvars']:
localhost = task_vars['hostvars'][host]
break
if 'ansible_syslog_facility' in localhost:
task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility']
for key in localhost:
if key.startswith("ansible_") and key.endswith("_interpreter"):
task_vars[key] = localhost[key]
def run(self, tmp=None, task_vars=dict()):
''' generates params and passes them on to the rsync module '''
original_transport = task_vars.get('ansible_connection') or self._play_context.connection
remote_transport = False
if original_transport != 'local':
remote_transport = True
try:
delegate_to = self._play_context.delegate_to
except (AttributeError, KeyError):
delegate_to = None
use_ssh_args = self._task.args.pop('use_ssh_args', None)
# Parameter name needed by the ansible module
self._task.args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync'
# rsync thinks that one end of the connection is localhost and the
# other is the host we're running the task for (Note: We use
# ansible's delegate_to mechanism to determine which host rsync is
# running on so localhost could be a non-controller machine if
# delegate_to is used)
src_host = '127.0.0.1'
dest_host = task_vars.get('ansible_ssh_host') or task_vars.get('inventory_hostname')
dest_is_local = dest_host in C.LOCALHOST
# CHECK FOR NON-DEFAULT SSH PORT
if self._task.args.get('dest_port', None) is None:
inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT
if inv_port is not None:
self._task.args['dest_port'] = inv_port
# Set use_delegate if we are going to run rsync on a delegated host
# instead of localhost
use_delegate = False
if dest_host == delegate_to:
# edge case: explicit delegate and dest_host are the same
# so we run rsync on the remote machine targetting its localhost
# (itself)
dest_host = '127.0.0.1'
use_delegate = True
elif delegate_to is not None and remote_transport:
# If we're delegating to a remote host then we need to use the
# delegate_to settings
use_delegate = True
# Delegate to localhost as the source of the rsync unless we've been
# told (via delegate_to) that a different host is the source of the
# rsync
transport_overridden = False
if not use_delegate and remote_transport:
# Create a connection to localhost to run rsync on
new_stdin = self._connection._new_stdin
new_connection = connection_loader.get('local', self._play_context, new_stdin)
self._connection = new_connection
transport_overridden = True
self._override_module_replaced_vars(task_vars)
# COMPARE DELEGATE, HOST AND TRANSPORT
between_multiple_hosts = False
if dest_host != src_host and remote_transport:
# We're not copying two filesystem trees on the same host so we
# need to correctly format the paths for rsync (like
# user@host:path/to/tree
between_multiple_hosts = True
# SWITCH SRC AND DEST HOST PER MODE
if self._task.args.get('mode', 'push') == 'pull':
(dest_host, src_host) = (src_host, dest_host)
# MUNGE SRC AND DEST PER REMOTE_HOST INFO
src = self._task.args.get('src', None)
dest = self._task.args.get('dest', None)
if between_multiple_hosts or use_delegate:
# Private key handling
if use_delegate:
private_key = task_vars.get('ansible_ssh_private_key_file') or self._play_context.private_key_file
else:
private_key = task_vars.get('ansible_ssh_private_key_file') or self._play_context.private_key_file
if private_key is not None:
private_key = os.path.expanduser(private_key)
self._task.args['private_key'] = private_key
# Src and dest rsync "path" handling
# Determine if we need a user@
user = None
if boolean(self._task.args.get('set_remote_user', 'yes')):
if use_delegate:
if 'hostvars' in task_vars and delegate_to in task_vars['hostvars']:
user = task_vars['hostvars'][delegate_to].get('ansible_ssh_user', None)
if not use_delegate or not user:
user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
# use the mode to define src and dest's url
if self._task.args.get('mode', 'push') == 'pull':
# src is a remote path: <user>@<host>, dest is a local path
src = self._process_remote(src_host, src, user)
dest = self._process_origin(dest_host, dest, user)
else:
# src is a local path, dest is a remote path: <user>@<host>
src = self._process_origin(src_host, src, user)
dest = self._process_remote(dest_host, dest, user)
else:
# Still need to munge paths (to account for roles) even if we aren't
# copying files between hosts
if not src.startswith('/'):
src = self._get_absolute_path(path=src)
if not dest.startswith('/'):
dest = self._get_absolute_path(path=dest)
self._task.args['src'] = src
self._task.args['dest'] = dest
# Allow custom rsync path argument
rsync_path = self._task.args.get('rsync_path', None)
# If no rsync_path is set, sudo was originally set, and dest is remote then add 'sudo rsync' argument
if not rsync_path and transport_overridden and self._play_context.become and self._play_context.become_method == 'sudo' and not dest_is_local:
rsync_path = 'sudo rsync'
# make sure rsync path is quoted.
if rsync_path:
self._task.args['rsync_path'] = '"%s"' % rsync_path
if use_ssh_args:
self._task.args['ssh_args'] = C.ANSIBLE_SSH_ARGS
# run the module and store the result
result = self._execute_module('synchronize', task_vars=task_vars)
if 'SyntaxError' in result['msg']:
# Emit a warning about using python3 because synchronize is
# somewhat unique in running on localhost
result['traceback'] = result['msg']
result['msg'] = 'SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this'
return result
| gpl-3.0 |
bprodoehl/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/generator/scons.py | 137 | 33601 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gyp
import gyp.common
import gyp.SCons as SCons
import os.path
import pprint
import re
# TODO: remove when we delete the last WriteList() call in this module
WriteList = SCons.WriteList
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': '${LIBPREFIX}',
'SHARED_LIB_PREFIX': '${SHLIBPREFIX}',
'STATIC_LIB_SUFFIX': '${LIBSUFFIX}',
'SHARED_LIB_SUFFIX': '${SHLIBSUFFIX}',
'INTERMEDIATE_DIR': '${INTERMEDIATE_DIR}',
'SHARED_INTERMEDIATE_DIR': '${SHARED_INTERMEDIATE_DIR}',
'OS': 'linux',
'PRODUCT_DIR': '$TOP_BUILDDIR',
'SHARED_LIB_DIR': '$LIB_DIR',
'LIB_DIR': '$LIB_DIR',
'RULE_INPUT_ROOT': '${SOURCE.filebase}',
'RULE_INPUT_EXT': '${SOURCE.suffix}',
'RULE_INPUT_NAME': '${SOURCE.file}',
'RULE_INPUT_PATH': '${SOURCE.abspath}',
'CONFIGURATION_NAME': '${CONFIG_NAME}',
}
# Tell GYP how to process the input for us.
generator_handles_variants = True
generator_wants_absolute_build_file_paths = True
def FixPath(path, prefix):
if not os.path.isabs(path) and not path[0] == '$':
path = prefix + path
return path
header = """\
# This file is generated; do not edit.
"""
_alias_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
_outputs = env.Alias(
['_%(target_name)s_action'],
%(inputs)s,
_action
)
env.AlwaysBuild(_outputs)
"""
_run_as_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
"""
_run_as_template_suffix = """
_run_as_target = env.Alias('run_%(target_name)s', target_files, _action)
env.Requires(_run_as_target, [
Alias('%(target_name)s'),
])
env.AlwaysBuild(_run_as_target)
"""
_command_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
_outputs = env.Command(
%(outputs)s,
%(inputs)s,
_action
)
"""
# This is copied from the default SCons action, updated to handle symlinks.
_copy_action_template = """
import shutil
import SCons.Action
def _copy_files_or_dirs_or_symlinks(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, dest)
return 0
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, 1)
def _copy_files_or_dirs_or_symlinks_str(dest, src):
return 'Copying %s to %s ...' % (src, dest)
GYPCopy = SCons.Action.ActionFactory(_copy_files_or_dirs_or_symlinks,
_copy_files_or_dirs_or_symlinks_str,
convert=str)
"""
_rule_template = """
%(name)s_additional_inputs = %(inputs)s
%(name)s_outputs = %(outputs)s
def %(name)s_emitter(target, source, env):
return (%(name)s_outputs, source + %(name)s_additional_inputs)
if GetOption('verbose'):
%(name)s_action = Action([%(action)s])
else:
%(name)s_action = Action([%(action)s], %(message)s)
env['BUILDERS']['%(name)s'] = Builder(action=%(name)s_action,
emitter=%(name)s_emitter)
_outputs = []
_processed_input_files = []
for infile in input_files:
if (type(infile) == type('')
and not os.path.isabs(infile)
and not infile[0] == '$'):
infile = %(src_dir)r + infile
if str(infile).endswith('.%(extension)s'):
_generated = env.%(name)s(infile)
env.Precious(_generated)
_outputs.append(_generated)
%(process_outputs_as_sources_line)s
else:
_processed_input_files.append(infile)
prerequisites.extend(_outputs)
input_files = _processed_input_files
"""
_spawn_hack = """
import re
import SCons.Platform.posix
needs_shell = re.compile('["\\'><!^&]')
def gyp_spawn(sh, escape, cmd, args, env):
def strip_scons_quotes(arg):
if arg[0] == '"' and arg[-1] == '"':
return arg[1:-1]
return arg
stripped_args = [strip_scons_quotes(a) for a in args]
if needs_shell.search(' '.join(stripped_args)):
return SCons.Platform.posix.exec_spawnvpe([sh, '-c', ' '.join(args)], env)
else:
return SCons.Platform.posix.exec_spawnvpe(stripped_args, env)
"""
escape_quotes_re = re.compile('^([^=]*=)"([^"]*)"$')
def escape_quotes(s):
return escape_quotes_re.sub('\\1\\"\\2\\"', s)
def GenerateConfig(fp, config, indent='', src_dir=''):
"""
Generates SCons dictionary items for a gyp configuration.
This provides the main translation between the (lower-case) gyp settings
keywords and the (upper-case) SCons construction variables.
"""
var_mapping = {
'ASFLAGS' : 'asflags',
'CCFLAGS' : 'cflags',
'CFLAGS' : 'cflags_c',
'CXXFLAGS' : 'cflags_cc',
'CPPDEFINES' : 'defines',
'CPPPATH' : 'include_dirs',
# Add the ldflags value to $LINKFLAGS, but not $SHLINKFLAGS.
# SCons defines $SHLINKFLAGS to incorporate $LINKFLAGS, so
# listing both here would case 'ldflags' to get appended to
# both, and then have it show up twice on the command line.
'LINKFLAGS' : 'ldflags',
}
postamble='\n%s],\n' % indent
for scons_var in sorted(var_mapping.keys()):
gyp_var = var_mapping[scons_var]
value = config.get(gyp_var)
if value:
if gyp_var in ('defines',):
value = [escape_quotes(v) for v in value]
if gyp_var in ('include_dirs',):
if src_dir and not src_dir.endswith('/'):
src_dir += '/'
result = []
for v in value:
v = FixPath(v, src_dir)
# Force SCons to evaluate the CPPPATH directories at
# SConscript-read time, so delayed evaluation of $SRC_DIR
# doesn't point it to the --generator-output= directory.
result.append('env.Dir(%r)' % v)
value = result
else:
value = map(repr, value)
WriteList(fp,
value,
prefix=indent,
preamble='%s%s = [\n ' % (indent, scons_var),
postamble=postamble)
def GenerateSConscript(output_filename, spec, build_file, build_file_data):
"""
Generates a SConscript file for a specific target.
This generates a SConscript file suitable for building any or all of
the target's configurations.
A SConscript file may be called multiple times to generate targets for
multiple configurations. Consequently, it needs to be ready to build
the target for any requested configuration, and therefore contains
information about the settings for all configurations (generated into
the SConscript file at gyp configuration time) as well as logic for
selecting (at SCons build time) the specific configuration being built.
The general outline of a generated SConscript file is:
-- Header
-- Import 'env'. This contains a $CONFIG_NAME construction
variable that specifies what configuration to build
(e.g. Debug, Release).
-- Configurations. This is a dictionary with settings for
the different configurations (Debug, Release) under which this
target can be built. The values in the dictionary are themselves
dictionaries specifying what construction variables should added
to the local copy of the imported construction environment
(Append), should be removed (FilterOut), and should outright
replace the imported values (Replace).
-- Clone the imported construction environment and update
with the proper configuration settings.
-- Initialize the lists of the targets' input files and prerequisites.
-- Target-specific actions and rules. These come after the
input file and prerequisite initializations because the
outputs of the actions and rules may affect the input file
list (process_outputs_as_sources) and get added to the list of
prerequisites (so that they're guaranteed to be executed before
building the target).
-- Call the Builder for the target itself.
-- Arrange for any copies to be made into installation directories.
-- Set up the {name} Alias (phony Node) for the target as the
primary handle for building all of the target's pieces.
-- Use env.Require() to make sure the prerequisites (explicitly
specified, but also including the actions and rules) are built
before the target itself.
-- Return the {name} Alias to the calling SConstruct file
so it can be added to the list of default targets.
"""
scons_target = SCons.Target(spec)
gyp_dir = os.path.dirname(output_filename)
if not gyp_dir:
gyp_dir = '.'
gyp_dir = os.path.abspath(gyp_dir)
output_dir = os.path.dirname(output_filename)
src_dir = build_file_data['_DEPTH']
src_dir_rel = gyp.common.RelativePath(src_dir, output_dir)
subdir = gyp.common.RelativePath(os.path.dirname(build_file), src_dir)
src_subdir = '$SRC_DIR/' + subdir
src_subdir_ = src_subdir + '/'
component_name = os.path.splitext(os.path.basename(build_file))[0]
target_name = spec['target_name']
if not os.path.exists(gyp_dir):
os.makedirs(gyp_dir)
fp = open(output_filename, 'w')
fp.write(header)
fp.write('\nimport os\n')
fp.write('\nImport("env")\n')
#
fp.write('\n')
fp.write('env = env.Clone(COMPONENT_NAME=%s,\n' % repr(component_name))
fp.write(' TARGET_NAME=%s)\n' % repr(target_name))
#
for config in spec['configurations'].itervalues():
if config.get('scons_line_length'):
fp.write(_spawn_hack)
break
#
indent = ' ' * 12
fp.write('\n')
fp.write('configurations = {\n')
for config_name, config in spec['configurations'].iteritems():
fp.write(' \'%s\' : {\n' % config_name)
fp.write(' \'Append\' : dict(\n')
GenerateConfig(fp, config, indent, src_subdir)
libraries = spec.get('libraries')
if libraries:
WriteList(fp,
map(repr, libraries),
prefix=indent,
preamble='%sLIBS = [\n ' % indent,
postamble='\n%s],\n' % indent)
fp.write(' ),\n')
fp.write(' \'FilterOut\' : dict(\n' )
for key, var in config.get('scons_remove', {}).iteritems():
fp.write(' %s = %s,\n' % (key, repr(var)))
fp.write(' ),\n')
fp.write(' \'Replace\' : dict(\n' )
scons_settings = config.get('scons_variable_settings', {})
for key in sorted(scons_settings.keys()):
val = pprint.pformat(scons_settings[key])
fp.write(' %s = %s,\n' % (key, val))
if 'c++' in spec.get('link_languages', []):
fp.write(' %s = %s,\n' % ('LINK', repr('$CXX')))
if config.get('scons_line_length'):
fp.write(' SPAWN = gyp_spawn,\n')
fp.write(' ),\n')
fp.write(' \'ImportExternal\' : [\n' )
for var in config.get('scons_import_variables', []):
fp.write(' %s,\n' % repr(var))
fp.write(' ],\n')
fp.write(' \'PropagateExternal\' : [\n' )
for var in config.get('scons_propagate_variables', []):
fp.write(' %s,\n' % repr(var))
fp.write(' ],\n')
fp.write(' },\n')
fp.write('}\n')
fp.write('\n'
'config = configurations[env[\'CONFIG_NAME\']]\n'
'env.Append(**config[\'Append\'])\n'
'env.FilterOut(**config[\'FilterOut\'])\n'
'env.Replace(**config[\'Replace\'])\n')
fp.write('\n'
'# Scons forces -fPIC for SHCCFLAGS on some platforms.\n'
'# Disable that so we can control it from cflags in gyp.\n'
'# Note that Scons itself is inconsistent with its -fPIC\n'
'# setting. SHCCFLAGS forces -fPIC, and SHCFLAGS does not.\n'
'# This will make SHCCFLAGS consistent with SHCFLAGS.\n'
'env[\'SHCCFLAGS\'] = [\'$CCFLAGS\']\n')
fp.write('\n'
'for _var in config[\'ImportExternal\']:\n'
' if _var in ARGUMENTS:\n'
' env[_var] = ARGUMENTS[_var]\n'
' elif _var in os.environ:\n'
' env[_var] = os.environ[_var]\n'
'for _var in config[\'PropagateExternal\']:\n'
' if _var in ARGUMENTS:\n'
' env[_var] = ARGUMENTS[_var]\n'
' elif _var in os.environ:\n'
' env[\'ENV\'][_var] = os.environ[_var]\n')
fp.write('\n'
"env['ENV']['LD_LIBRARY_PATH'] = env.subst('$LIB_DIR')\n")
#
#fp.write("\nif env.has_key('CPPPATH'):\n")
#fp.write(" env['CPPPATH'] = map(env.Dir, env['CPPPATH'])\n")
variants = spec.get('variants', {})
for setting in sorted(variants.keys()):
if_fmt = 'if ARGUMENTS.get(%s) not in (None, \'0\'):\n'
fp.write('\n')
fp.write(if_fmt % repr(setting.upper()))
fp.write(' env.AppendUnique(\n')
GenerateConfig(fp, variants[setting], indent, src_subdir)
fp.write(' )\n')
#
scons_target.write_input_files(fp)
fp.write('\n')
fp.write('target_files = []\n')
prerequisites = spec.get('scons_prerequisites', [])
fp.write('prerequisites = %s\n' % pprint.pformat(prerequisites))
actions = spec.get('actions', [])
for action in actions:
a = ['cd', src_subdir, '&&'] + action['action']
message = action.get('message')
if message:
message = repr(message)
inputs = [FixPath(f, src_subdir_) for f in action.get('inputs', [])]
outputs = [FixPath(f, src_subdir_) for f in action.get('outputs', [])]
if outputs:
template = _command_template
else:
template = _alias_template
fp.write(template % {
'inputs' : pprint.pformat(inputs),
'outputs' : pprint.pformat(outputs),
'action' : pprint.pformat(a),
'message' : message,
'target_name': target_name,
})
if int(action.get('process_outputs_as_sources', 0)):
fp.write('input_files.extend(_outputs)\n')
fp.write('prerequisites.extend(_outputs)\n')
fp.write('target_files.extend(_outputs)\n')
rules = spec.get('rules', [])
for rule in rules:
name = rule['rule_name']
a = ['cd', src_subdir, '&&'] + rule['action']
message = rule.get('message')
if message:
message = repr(message)
if int(rule.get('process_outputs_as_sources', 0)):
poas_line = '_processed_input_files.extend(_generated)'
else:
poas_line = '_processed_input_files.append(infile)'
inputs = [FixPath(f, src_subdir_) for f in rule.get('inputs', [])]
outputs = [FixPath(f, src_subdir_) for f in rule.get('outputs', [])]
fp.write(_rule_template % {
'inputs' : pprint.pformat(inputs),
'outputs' : pprint.pformat(outputs),
'action' : pprint.pformat(a),
'extension' : rule['extension'],
'name' : name,
'message' : message,
'process_outputs_as_sources_line' : poas_line,
'src_dir' : src_subdir_,
})
scons_target.write_target(fp, src_subdir)
copies = spec.get('copies', [])
if copies:
fp.write(_copy_action_template)
for copy in copies:
destdir = None
files = None
try:
destdir = copy['destination']
except KeyError, e:
gyp.common.ExceptionAppend(
e,
"Required 'destination' key missing for 'copies' in %s." % build_file)
raise
try:
files = copy['files']
except KeyError, e:
gyp.common.ExceptionAppend(
e, "Required 'files' key missing for 'copies' in %s." % build_file)
raise
if not files:
# TODO: should probably add a (suppressible) warning;
# a null file list may be unintentional.
continue
if not destdir:
raise Exception(
"Required 'destination' key is empty for 'copies' in %s." % build_file)
fmt = ('\n'
'_outputs = env.Command(%s,\n'
' %s,\n'
' GYPCopy(\'$TARGET\', \'$SOURCE\'))\n')
for f in copy['files']:
dest = os.path.join(destdir, os.path.basename(f))
f = FixPath(f, src_subdir_)
dest = FixPath(dest, src_subdir_)
fp.write(fmt % (repr(dest), repr(f)))
fp.write('target_files.extend(_outputs)\n')
if spec.get('run_as') or int(spec.get('test', 0)):
run_as = spec.get('run_as', {
'action' : ['$TARGET_NAME', '--gtest_print_time'],
})
action = run_as.get('action', [])
working_directory = run_as.get('working_directory')
if not working_directory:
working_directory = gyp_dir
else:
if not os.path.isabs(working_directory):
working_directory = os.path.normpath(os.path.join(gyp_dir,
working_directory))
if run_as.get('environment'):
for (key, val) in run_as.get('environment').iteritems():
action = ['%s="%s"' % (key, val)] + action
action = ['cd', '"%s"' % working_directory, '&&'] + action
fp.write(_run_as_template % {
'action' : pprint.pformat(action),
'message' : run_as.get('message', ''),
})
fmt = "\ngyp_target = env.Alias('%s', target_files)\n"
fp.write(fmt % target_name)
dependencies = spec.get('scons_dependencies', [])
if dependencies:
WriteList(fp, dependencies, preamble='dependencies = [\n ',
postamble='\n]\n')
fp.write('env.Requires(target_files, dependencies)\n')
fp.write('env.Requires(gyp_target, dependencies)\n')
fp.write('for prerequisite in prerequisites:\n')
fp.write(' env.Requires(prerequisite, dependencies)\n')
fp.write('env.Requires(gyp_target, prerequisites)\n')
if spec.get('run_as', 0) or int(spec.get('test', 0)):
fp.write(_run_as_template_suffix % {
'target_name': target_name,
})
fp.write('Return("gyp_target")\n')
fp.close()
#############################################################################
# TEMPLATE BEGIN
_wrapper_template = """\
__doc__ = '''
Wrapper configuration for building this entire "solution,"
including all the specific targets in various *.scons files.
'''
import os
import sys
import SCons.Environment
import SCons.Util
def GetProcessorCount():
'''
Detects the number of CPUs on the system. Adapted form:
http://codeliberates.blogspot.com/2008/05/detecting-cpuscores-in-python.html
'''
# Linux, Unix and Mac OS X:
if hasattr(os, 'sysconf'):
if os.sysconf_names.has_key('SC_NPROCESSORS_ONLN'):
# Linux and Unix or Mac OS X with python >= 2.5:
return os.sysconf('SC_NPROCESSORS_ONLN')
else: # Mac OS X with Python < 2.5:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key('NUMBER_OF_PROCESSORS'):
return max(int(os.environ.get('NUMBER_OF_PROCESSORS', '1')), 1)
return 1 # Default
# Support PROGRESS= to show progress in different ways.
p = ARGUMENTS.get('PROGRESS')
if p == 'spinner':
Progress(['/\\r', '|\\r', '\\\\\\r', '-\\r'],
interval=5,
file=open('/dev/tty', 'w'))
elif p == 'name':
Progress('$TARGET\\r', overwrite=True, file=open('/dev/tty', 'w'))
# Set the default -j value based on the number of processors.
SetOption('num_jobs', GetProcessorCount() + 1)
# Have SCons use its cached dependency information.
SetOption('implicit_cache', 1)
# Only re-calculate MD5 checksums if a timestamp has changed.
Decider('MD5-timestamp')
# Since we set the -j value by default, suppress SCons warnings about being
# unable to support parallel build on versions of Python with no threading.
default_warnings = ['no-no-parallel-support']
SetOption('warn', default_warnings + GetOption('warn'))
AddOption('--mode', nargs=1, dest='conf_list', default=[],
action='append', help='Configuration to build.')
AddOption('--verbose', dest='verbose', default=False,
action='store_true', help='Verbose command-line output.')
#
sconscript_file_map = %(sconscript_files)s
class LoadTarget:
'''
Class for deciding if a given target sconscript is to be included
based on a list of included target names, optionally prefixed with '-'
to exclude a target name.
'''
def __init__(self, load):
'''
Initialize a class with a list of names for possible loading.
Arguments:
load: list of elements in the LOAD= specification
'''
self.included = set([c for c in load if not c.startswith('-')])
self.excluded = set([c[1:] for c in load if c.startswith('-')])
if not self.included:
self.included = set(['all'])
def __call__(self, target):
'''
Returns True if the specified target's sconscript file should be
loaded, based on the initialized included and excluded lists.
'''
return (target in self.included or
('all' in self.included and not target in self.excluded))
if 'LOAD' in ARGUMENTS:
load = ARGUMENTS['LOAD'].split(',')
else:
load = []
load_target = LoadTarget(load)
sconscript_files = []
for target, sconscript in sconscript_file_map.iteritems():
if load_target(target):
sconscript_files.append(sconscript)
target_alias_list= []
conf_list = GetOption('conf_list')
if conf_list:
# In case the same --mode= value was specified multiple times.
conf_list = list(set(conf_list))
else:
conf_list = [%(default_configuration)r]
sconsbuild_dir = Dir(%(sconsbuild_dir)s)
def FilterOut(self, **kw):
kw = SCons.Environment.copy_non_reserved_keywords(kw)
for key, val in kw.items():
envval = self.get(key, None)
if envval is None:
# No existing variable in the environment, so nothing to delete.
continue
for vremove in val:
# Use while not if, so we can handle duplicates.
while vremove in envval:
envval.remove(vremove)
self[key] = envval
# TODO(sgk): SCons.Environment.Append() has much more logic to deal
# with various types of values. We should handle all those cases in here
# too. (If variable is a dict, etc.)
non_compilable_suffixes = {
'LINUX' : set([
'.bdic',
'.css',
'.dat',
'.fragment',
'.gperf',
'.h',
'.hh',
'.hpp',
'.html',
'.hxx',
'.idl',
'.in',
'.in0',
'.in1',
'.js',
'.mk',
'.rc',
'.sigs',
'',
]),
'WINDOWS' : set([
'.h',
'.hh',
'.hpp',
'.dat',
'.idl',
'.in',
'.in0',
'.in1',
]),
}
def compilable(env, file):
base, ext = os.path.splitext(str(file))
if ext in non_compilable_suffixes[env['TARGET_PLATFORM']]:
return False
return True
def compilable_files(env, sources):
return [x for x in sources if compilable(env, x)]
def GypProgram(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Program('$TOP_BUILDDIR/' + str(target), source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(result)
return result
def GypTestProgram(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Program('$TOP_BUILDDIR/' + str(target), source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(*result)
return result
def GypLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Library('$LIB_DIR/' + str(target), source, *args, **kw)
return result
def GypLoadableModule(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.LoadableModule('$TOP_BUILDDIR/' + str(target), source, *args,
**kw)
return result
def GypStaticLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.StaticLibrary('$LIB_DIR/' + str(target), source, *args, **kw)
return result
def GypSharedLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.SharedLibrary('$LIB_DIR/' + str(target), source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(result)
return result
def add_gyp_methods(env):
env.AddMethod(GypProgram)
env.AddMethod(GypTestProgram)
env.AddMethod(GypLibrary)
env.AddMethod(GypLoadableModule)
env.AddMethod(GypStaticLibrary)
env.AddMethod(GypSharedLibrary)
env.AddMethod(FilterOut)
env.AddMethod(compilable)
base_env = Environment(
tools = %(scons_tools)s,
INTERMEDIATE_DIR='$OBJ_DIR/${COMPONENT_NAME}/_${TARGET_NAME}_intermediate',
LIB_DIR='$TOP_BUILDDIR/lib',
OBJ_DIR='$TOP_BUILDDIR/obj',
SCONSBUILD_DIR=sconsbuild_dir.abspath,
SHARED_INTERMEDIATE_DIR='$OBJ_DIR/_global_intermediate',
SRC_DIR=Dir(%(src_dir)r),
TARGET_PLATFORM='LINUX',
TOP_BUILDDIR='$SCONSBUILD_DIR/$CONFIG_NAME',
LIBPATH=['$LIB_DIR'],
)
if not GetOption('verbose'):
base_env.SetDefault(
ARCOMSTR='Creating library $TARGET',
ASCOMSTR='Assembling $TARGET',
CCCOMSTR='Compiling $TARGET',
CONCATSOURCECOMSTR='ConcatSource $TARGET',
CXXCOMSTR='Compiling $TARGET',
LDMODULECOMSTR='Building loadable module $TARGET',
LINKCOMSTR='Linking $TARGET',
MANIFESTCOMSTR='Updating manifest for $TARGET',
MIDLCOMSTR='Compiling IDL $TARGET',
PCHCOMSTR='Precompiling $TARGET',
RANLIBCOMSTR='Indexing $TARGET',
RCCOMSTR='Compiling resource $TARGET',
SHCCCOMSTR='Compiling $TARGET',
SHCXXCOMSTR='Compiling $TARGET',
SHLINKCOMSTR='Linking $TARGET',
SHMANIFESTCOMSTR='Updating manifest for $TARGET',
)
add_gyp_methods(base_env)
for conf in conf_list:
env = base_env.Clone(CONFIG_NAME=conf)
SConsignFile(env.File('$TOP_BUILDDIR/.sconsign').abspath)
for sconscript in sconscript_files:
target_alias = env.SConscript(sconscript, exports=['env'])
if target_alias:
target_alias_list.extend(target_alias)
Default(Alias('all', target_alias_list))
help_fmt = '''
Usage: hammer [SCONS_OPTIONS] [VARIABLES] [TARGET] ...
Local command-line build options:
--mode=CONFIG Configuration to build:
--mode=Debug [default]
--mode=Release
--verbose Print actual executed command lines.
Supported command-line build variables:
LOAD=[module,...] Comma-separated list of components to load in the
dependency graph ('-' prefix excludes)
PROGRESS=type Display a progress indicator:
name: print each evaluated target name
spinner: print a spinner every 5 targets
The following TARGET names can also be used as LOAD= module names:
%%s
'''
if GetOption('help'):
def columnar_text(items, width=78, indent=2, sep=2):
result = []
colwidth = max(map(len, items)) + sep
cols = (width - indent) / colwidth
if cols < 1:
cols = 1
rows = (len(items) + cols - 1) / cols
indent = '%%*s' %% (indent, '')
sep = indent
for row in xrange(0, rows):
result.append(sep)
for i in xrange(row, len(items), rows):
result.append('%%-*s' %% (colwidth, items[i]))
sep = '\\n' + indent
result.append('\\n')
return ''.join(result)
load_list = set(sconscript_file_map.keys())
target_aliases = set(map(str, target_alias_list))
common = load_list and target_aliases
load_only = load_list - common
target_only = target_aliases - common
help_text = [help_fmt %% columnar_text(sorted(list(common)))]
if target_only:
fmt = "The following are additional TARGET names:\\n\\n%%s\\n"
help_text.append(fmt %% columnar_text(sorted(list(target_only))))
if load_only:
fmt = "The following are additional LOAD= module names:\\n\\n%%s\\n"
help_text.append(fmt %% columnar_text(sorted(list(load_only))))
Help(''.join(help_text))
"""
# TEMPLATE END
#############################################################################
def GenerateSConscriptWrapper(build_file, build_file_data, name,
output_filename, sconscript_files,
default_configuration):
"""
Generates the "wrapper" SConscript file (analogous to the Visual Studio
solution) that calls all the individual target SConscript files.
"""
output_dir = os.path.dirname(output_filename)
src_dir = build_file_data['_DEPTH']
src_dir_rel = gyp.common.RelativePath(src_dir, output_dir)
if not src_dir_rel:
src_dir_rel = '.'
scons_settings = build_file_data.get('scons_settings', {})
sconsbuild_dir = scons_settings.get('sconsbuild_dir', '#')
scons_tools = scons_settings.get('tools', ['default'])
sconscript_file_lines = ['dict(']
for target in sorted(sconscript_files.keys()):
sconscript = sconscript_files[target]
sconscript_file_lines.append(' %s = %r,' % (target, sconscript))
sconscript_file_lines.append(')')
fp = open(output_filename, 'w')
fp.write(header)
fp.write(_wrapper_template % {
'default_configuration' : default_configuration,
'name' : name,
'scons_tools' : repr(scons_tools),
'sconsbuild_dir' : repr(sconsbuild_dir),
'sconscript_files' : '\n'.join(sconscript_file_lines),
'src_dir' : src_dir_rel,
})
fp.close()
# Generate the SConstruct file that invokes the wrapper SConscript.
dir, fname = os.path.split(output_filename)
SConstruct = os.path.join(dir, 'SConstruct')
fp = open(SConstruct, 'w')
fp.write(header)
fp.write('SConscript(%s)\n' % repr(fname))
fp.close()
def TargetFilename(target, build_file=None, output_suffix=''):
"""Returns the .scons file name for the specified target.
"""
if build_file is None:
build_file, target = gyp.common.ParseQualifiedTarget(target)[:2]
output_file = os.path.join(os.path.dirname(build_file),
target + output_suffix + '.scons')
return output_file
def GenerateOutput(target_list, target_dicts, data, params):
"""
Generates all the output files for the specified targets.
"""
options = params['options']
if options.generator_output:
def output_path(filename):
return filename.replace(params['cwd'], options.generator_output)
else:
def output_path(filename):
return filename
default_configuration = None
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in scons build (target %s)' %
qualified_target)
scons_target = SCons.Target(spec)
if scons_target.is_ignored:
continue
# TODO: assumes the default_configuration of the first target
# non-Default target is the correct default for all targets.
# Need a better model for handle variation between targets.
if (not default_configuration and
spec['default_configuration'] != 'Default'):
default_configuration = spec['default_configuration']
build_file, target = gyp.common.ParseQualifiedTarget(qualified_target)[:2]
output_file = TargetFilename(target, build_file, options.suffix)
if options.generator_output:
output_file = output_path(output_file)
if not spec.has_key('libraries'):
spec['libraries'] = []
# Add dependent static library targets to the 'libraries' value.
deps = spec.get('dependencies', [])
spec['scons_dependencies'] = []
for d in deps:
td = target_dicts[d]
target_name = td['target_name']
spec['scons_dependencies'].append("Alias('%s')" % target_name)
if td['type'] in ('static_library', 'shared_library'):
libname = td.get('product_name', target_name)
spec['libraries'].append(libname)
if td['type'] == 'loadable_module':
prereqs = spec.get('scons_prerequisites', [])
# TODO: parameterize with <(SHARED_LIBRARY_*) variables?
td_target = SCons.Target(td)
td_target.target_prefix = '${SHLIBPREFIX}'
td_target.target_suffix = '${SHLIBSUFFIX}'
prereqs.append(td_target.full_product_name())
spec['scons_prerequisites'] = prereqs
GenerateSConscript(output_file, spec, build_file, data[build_file])
if not default_configuration:
default_configuration = 'Default'
for build_file in sorted(data.keys()):
path, ext = os.path.splitext(build_file)
if ext != '.gyp':
continue
output_dir, basename = os.path.split(path)
output_filename = path + '_main' + options.suffix + '.scons'
all_targets = gyp.common.AllTargets(target_list, target_dicts, build_file)
sconscript_files = {}
for t in all_targets:
scons_target = SCons.Target(target_dicts[t])
if scons_target.is_ignored:
continue
bf, target = gyp.common.ParseQualifiedTarget(t)[:2]
target_filename = TargetFilename(target, bf, options.suffix)
tpath = gyp.common.RelativePath(target_filename, output_dir)
sconscript_files[target] = tpath
output_filename = output_path(output_filename)
if sconscript_files:
GenerateSConscriptWrapper(build_file, data[build_file], basename,
output_filename, sconscript_files,
default_configuration)
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.