repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
EliotBerriot/django | django/conf/locale/zh_Hant/formats.py | 1008 | 1810 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
TIME_FORMAT = 'H:i' # 20:45
DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
YEAR_MONTH_FORMAT = 'Y年n月' # 2016年9月
MONTH_DAY_FORMAT = 'm月j日' # 9月5日
SHORT_DATE_FORMAT = 'Y年n月j日' # 2016年9月5日
SHORT_DATETIME_FORMAT = 'Y年n月j日 H:i' # 2016年9月5日 20:45
FIRST_DAY_OF_WEEK = 1 # 星期一 (Monday)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%Y/%m/%d', # '2016/09/05'
'%Y-%m-%d', # '2016-09-05'
'%Y年%n月%j日', # '2016年9月5日'
]
TIME_INPUT_FORMATS = [
'%H:%M', # '20:45'
'%H:%M:%S', # '20:45:29'
'%H:%M:%S.%f', # '20:45:29.000200'
]
DATETIME_INPUT_FORMATS = [
'%Y/%m/%d %H:%M', # '2016/09/05 20:45'
'%Y-%m-%d %H:%M', # '2016-09-05 20:45'
'%Y年%n月%j日 %H:%M', # '2016年9月5日 14:45'
'%Y/%m/%d %H:%M:%S', # '2016/09/05 20:45:29'
'%Y-%m-%d %H:%M:%S', # '2016-09-05 20:45:29'
'%Y年%n月%j日 %H:%M:%S', # '2016年9月5日 20:45:29'
'%Y/%m/%d %H:%M:%S.%f', # '2016/09/05 20:45:29.000200'
'%Y-%m-%d %H:%M:%S.%f', # '2016-09-05 20:45:29.000200'
'%Y年%n月%j日 %H:%n:%S.%f', # '2016年9月5日 20:45:29.000200'
]
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ''
NUMBER_GROUPING = 4
| bsd-3-clause |
PongPi/isl-odoo | openerp/addons/base/module/wizard/base_module_upgrade.py | 294 | 5164 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.osv import osv, fields
from openerp.tools.translate import _
class base_module_upgrade(osv.osv_memory):
""" Module Upgrade """
_name = "base.module.upgrade"
_description = "Module Upgrade"
_columns = {
'module_info': fields.text('Modules to Update',readonly=True),
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(base_module_upgrade, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if view_type != 'form':
return res
context = {} if context is None else context
record_id = context and context.get('active_id', False) or False
active_model = context.get('active_model')
if (not record_id) or (not active_model):
return res
ids = self.get_module_list(cr, uid, context=context)
if not ids:
res['arch'] = '''<form string="Upgrade Completed" version="7.0">
<separator string="Upgrade Completed" colspan="4"/>
<footer>
<button name="config" string="Start Configuration" type="object" class="oe_highlight"/> or
<button special="cancel" string="Close" class="oe_link"/>
</footer>
</form>'''
return res
def get_module_list(self, cr, uid, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove', 'to install'])])
return ids
def default_get(self, cr, uid, fields, context=None):
mod_obj = self.pool.get('ir.module.module')
ids = self.get_module_list(cr, uid, context=context)
res = mod_obj.read(cr, uid, ids, ['name','state'], context)
return {'module_info': '\n'.join(map(lambda x: x['name']+' : '+x['state'], res))}
def upgrade_module_cancel(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.module.module')
to_installed_ids = mod_obj.search(cr, uid, [
('state', 'in', ['to upgrade', 'to remove'])])
if to_installed_ids:
mod_obj.write(cr, uid, to_installed_ids, {'state': 'installed'}, context=context)
to_uninstalled_ids = mod_obj.search(cr, uid, [
('state', '=', 'to install')])
if to_uninstalled_ids:
mod_obj.write(cr, uid, to_uninstalled_ids, {'state': 'uninstalled'}, context=context)
return {'type': 'ir.actions.act_window_close'}
def upgrade_module(self, cr, uid, ids, context=None):
ir_module = self.pool.get('ir.module.module')
# install/upgrade: double-check preconditions
ids = ir_module.search(cr, uid, [('state', 'in', ['to upgrade', 'to install'])])
if ids:
cr.execute("""SELECT d.name FROM ir_module_module m
JOIN ir_module_module_dependency d ON (m.id = d.module_id)
LEFT JOIN ir_module_module m2 ON (d.name = m2.name)
WHERE m.id in %s and (m2.state IS NULL or m2.state IN %s)""",
(tuple(ids), ('uninstalled',)))
unmet_packages = [x[0] for x in cr.fetchall()]
if unmet_packages:
raise osv.except_osv(_('Unmet Dependency!'),
_('Following modules are not installed or unknown: %s') % ('\n\n' + '\n'.join(unmet_packages)))
ir_module.download(cr, uid, ids, context=context)
cr.commit() # save before re-creating cursor below
openerp.api.Environment.reset()
openerp.modules.registry.RegistryManager.new(cr.dbname, update_module=True)
return {'type': 'ir.actions.act_window_close'}
def config(self, cr, uid, ids, context=None):
return self.pool.get('res.config').next(cr, uid, [], context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
tjma12/pycbc | pycbc/types/array_cuda.py | 12 | 11741 | # Copyright (C) 2012 Alex Nitz
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# =============================================================================
#
# Preamble
#
# =============================================================================
#
"""Pycuda based
"""
import pycuda.driver
from pycuda.elementwise import ElementwiseKernel
from pycuda.reduction import ReductionKernel
from pycuda.tools import get_or_register_dtype
from pycuda.tools import context_dependent_memoize
from pycuda.tools import dtype_to_ctype
from pytools import match_precision
from pycuda.gpuarray import _get_common_dtype, empty, GPUArray
import pycuda.gpuarray
from pycuda.scan import InclusiveScanKernel
import numpy as np
include_complex = """
#include <pycuda-complex.hpp>
"""
@context_dependent_memoize
def get_cumsum_kernel(dtype):
return InclusiveScanKernel(dtype, "a+b", preamble=include_complex)
def icumsum(vec):
krnl = get_cumsum_kernel(vec.dtype)
return krnl(vec)
@context_dependent_memoize
def call_prepare(self, sz, allocator):
MAX_BLOCK_COUNT = 1024
SMALL_SEQ_COUNT = 4
if sz <= self.block_size*SMALL_SEQ_COUNT*MAX_BLOCK_COUNT:
total_block_size = SMALL_SEQ_COUNT*self.block_size
block_count = (sz + total_block_size - 1) // total_block_size
seq_count = SMALL_SEQ_COUNT
else:
block_count = MAX_BLOCK_COUNT
macroblock_size = block_count*self.block_size
seq_count = (sz + macroblock_size - 1) // macroblock_size
if block_count == 1:
result = empty((), self.dtype_out, allocator)
else:
result = empty((block_count,), self.dtype_out, allocator)
grid_size = (block_count, 1)
block_size = (self.block_size, 1, 1)
return result, block_count, seq_count, grid_size, block_size
class LowerLatencyReductionKernel(ReductionKernel):
def __init__(self, dtype_out,
neutral, reduce_expr, map_expr=None, arguments=None,
name="reduce_kernel", keep=False, options=None, preamble=""):
ReductionKernel.__init__(self, dtype_out,
neutral, reduce_expr, map_expr, arguments,
name, keep, options, preamble)
self.shared_size=self.block_size*self.dtype_out.itemsize
def __call__(self, *args, **kwargs):
f = self.stage1_func
s1_invocation_args = []
for arg in args:
s1_invocation_args.append(arg.gpudata)
sz = args[0].size
result, block_count, seq_count, grid_size, block_size = call_prepare(self, sz, args[0].allocator)
f(grid_size, block_size, None,
*([result.gpudata]+s1_invocation_args+[seq_count, sz]),
shared_size=self.shared_size)
while True:
f = self.stage2_func
sz = result.size
result2 = result
result, block_count, seq_count, grid_size, block_size = call_prepare(self, sz, args[0].allocator)
f(grid_size, block_size, None,
*([result.gpudata, result2.gpudata]+s1_invocation_args+[seq_count, sz]),
shared_size=self.shared_size)
if block_count == 1:
return result
@context_dependent_memoize
def get_norm_kernel(dtype_x, dtype_out):
return ElementwiseKernel(
"%(tp_x)s *x, %(tp_z)s *z" % {
"tp_x": dtype_to_ctype(dtype_x),
"tp_z": dtype_to_ctype(dtype_out),
},
"z[i] = norm(x[i])",
"normalize")
def squared_norm(self):
a = self.data
dtype_out = match_precision(np.dtype('float64'), a.dtype)
out = a._new_like_me(dtype=dtype_out)
krnl = get_norm_kernel(a.dtype, dtype_out)
krnl(a, out)
return out
# FIXME: Write me!
#def multiply_and_add(self, other, mult_fac):
# """
# Return other multiplied by mult_fac and with self added.
# Self will be modified in place. This requires all inputs to be of the same
# precision.
# """
@context_dependent_memoize
def get_weighted_inner_kernel(dtype_x, dtype_y, dtype_w, dtype_out):
if (dtype_x == np.complex64) or (dtype_x == np.complex128):
inner_map="conj(x[i])*y[i]/w[i]"
else:
inner_map="x[i]*y[i]/w[i]"
return LowerLatencyReductionKernel(dtype_out,
neutral="0",
arguments="%(tp_x)s *x, %(tp_y)s *y, %(tp_w)s *w" % {
"tp_x": dtype_to_ctype(dtype_x),
"tp_y": dtype_to_ctype(dtype_y),
"tp_w": dtype_to_ctype(dtype_w),
},
reduce_expr="a+b",
map_expr=inner_map,
name="weighted_inner")
@context_dependent_memoize
def get_inner_kernel(dtype_x, dtype_y, dtype_out):
if (dtype_x == np.complex64) or (dtype_x == np.complex128):
inner_map="conj(x[i])*y[i]"
else:
inner_map="x[i]*y[i]"
return LowerLatencyReductionKernel(dtype_out,
neutral="0",
arguments="%(tp_x)s *x, %(tp_y)s *y" % {
"tp_x": dtype_to_ctype(dtype_x),
"tp_y": dtype_to_ctype(dtype_y),
},
reduce_expr="a+b",
map_expr=inner_map,
name="inner")
def inner(self, b):
a = self.data
dtype_out = _get_common_dtype(a,b)
krnl = get_inner_kernel(a.dtype, b.dtype, dtype_out)
return krnl(a, b).get().max()
vdot = inner
def weighted_inner(self, b, w):
if w is None:
return self.inner(b)
a = self.data
dtype_out = _get_common_dtype(a, b)
krnl = get_weighted_inner_kernel(a.dtype, b.dtype, w.dtype, dtype_out)
return krnl(a, b, w).get().max()
# Define PYCUDA MAXLOC for both single and double precission ##################
maxloc_preamble = """
struct MAXLOCN{
TTYPE max;
LTYPE loc;
__device__
MAXLOCN(){}
__device__
MAXLOCN(MAXLOCN const &src): max(src.max), loc(src.loc){}
__device__
MAXLOCN(MAXLOCN const volatile &src): max(src.max), loc(src.loc){}
__device__
MAXLOCN volatile &operator=( MAXLOCN const &src) volatile{
max = src.max;
loc = src.loc;
return *this;
}
};
__device__
MAXLOCN maxloc_red(MAXLOCN a, MAXLOCN b){
if (a.max > b.max)
return a;
else
return b;
}
__device__
MAXLOCN maxloc_start(){
MAXLOCN t;
t.max=0;
t.loc=0;
return t;
}
__device__
MAXLOCN maxloc_map(TTYPE val, LTYPE loc){
MAXLOCN t;
t.max = val;
t.loc = loc;
return t;
}
"""
maxloc_preamble_single = """
#define MAXLOCN maxlocs
#define TTYPE float
#define LTYPE int
""" + maxloc_preamble
maxloc_preamble_double = """
#define MAXLOCN maxlocd
#define TTYPE double
#define LTYPE long
""" + maxloc_preamble
maxloc_dtype_double = np.dtype([("max", np.float64), ("loc", np.int64)])
maxloc_dtype_single = np.dtype([("max", np.float32), ("loc", np.int32)])
maxloc_dtype_single = get_or_register_dtype("maxlocs", dtype=maxloc_dtype_single)
maxloc_dtype_double = get_or_register_dtype("maxlocd", dtype=maxloc_dtype_double)
mls = LowerLatencyReductionKernel(maxloc_dtype_single, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(x[i], i)",
arguments="float *x", preamble=maxloc_preamble_single)
mld = LowerLatencyReductionKernel(maxloc_dtype_double, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(x[i], i)",
arguments="double *x", preamble=maxloc_preamble_double)
max_loc_map = {'single':mls,'double':mld}
amls = LowerLatencyReductionKernel(maxloc_dtype_single, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(abs(x[i]), i)",
arguments="float *x", preamble=maxloc_preamble_single)
amld = LowerLatencyReductionKernel(maxloc_dtype_double, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(abs(x[i]), i)",
arguments="double *x", preamble=maxloc_preamble_double)
amlsc = LowerLatencyReductionKernel(maxloc_dtype_single, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(abs(x[i]), i)",
arguments="pycuda::complex<float> *x", preamble=maxloc_preamble_single)
amldc = LowerLatencyReductionKernel(maxloc_dtype_double, neutral = "maxloc_start()",
reduce_expr="maxloc_red(a, b)", map_expr="maxloc_map(abs(x[i]), i)",
arguments="pycuda::complex<double> *x", preamble=maxloc_preamble_double)
abs_max_loc_map = {'single':{ 'real':amls, 'complex':amlsc }, 'double':{ 'real':amld, 'complex':amldc }}
def zeros(length, dtype=np.float64):
result = GPUArray(length, dtype=dtype)
nwords = result.nbytes / 4
pycuda.driver.memset_d32(result.gpudata, 0, nwords)
return result
def ptr(self):
return self._data.ptr
def dot(self, other):
return pycuda.gpuarray.dot(self._data,other).get().max()
def min(self):
return pycuda.gpuarray.min(self._data).get().max()
def abs_max_loc(self):
maxloc = abs_max_loc_map[self.precision][self.kind](self._data)
maxloc = maxloc.get()
return float(maxloc['max']),int(maxloc['loc'])
def cumsum(self):
tmp = self.data*1
return icumsum(tmp)
def max(self):
return pycuda.gpuarray.max(self._data).get().max()
def max_loc(self):
maxloc = max_loc_map[self.precision](self._data)
maxloc = maxloc.get()
return float(maxloc['max']),int(maxloc['loc'])
def take(self, indices):
if not isinstance(indices, pycuda.gpuarray.GPUArray):
indices = pycuda.gpuarray.to_gpu(indices)
return pycuda.gpuarray.take(self.data, indices)
def numpy(self):
return self._data.get()
def _copy(self, self_ref, other_ref):
if (len(other_ref) <= len(self_ref)) :
from pycuda.elementwise import get_copy_kernel
func = get_copy_kernel(self.dtype, other_ref.dtype)
func.prepared_async_call(self_ref._grid, self_ref._block, None,
self_ref.gpudata, other_ref.gpudata,
self_ref.mem_size)
else:
raise RuntimeError("The arrays must the same length")
def _getvalue(self, index):
return self._data.get()[index]
def sum(self):
return pycuda.gpuarray.sum(self._data).get().max()
def clear(self):
n32 = self.data.nbytes / 4
pycuda.driver.memset_d32(self.data.gpudata, 0, n32)
def _scheme_matches_base_array(array):
if isinstance(array, pycuda.gpuarray.GPUArray):
return True
else:
return False
def _copy_base_array(array):
data = pycuda.gpuarray.GPUArray((array.size), array.dtype)
if len(array) > 0:
pycuda.driver.memcpy_dtod(data.gpudata, array.gpudata, array.nbytes)
return data
def _to_device(array):
return pycuda.gpuarray.to_gpu(array)
| gpl-3.0 |
jonasluz/mia-cg | pyCohenSutherland/cohen-sutherland.py | 1 | 1297 | # -*- coding: utf-8 -*-
import sys
import cs
def main():
"""
Rotina principal.
"""
# Lê arquivo de dados.
filename = sys.argv[1] if len(sys.argv) > 1 else 'input.dat'
file = open(filename, 'r')
data = file.read()
lines = data.split("\n")
wlims = [int(s) for s in lines[0].split(" ")]
left, right, bottom, top = wlims[0], wlims[1], wlims[2], wlims[3]
segments, name = {}, "A"
for i in range(1, len(lines)-1):
segment = [int(s) for s in lines[i].split(' ')]
segments[name] = ( segment[0], segment[1], segment[2], segment[3] )
name = chr(ord(name)+1)
print('Janela: ({:}, {:}) - ({:}, {:})'.format(left, right, bottom, top))
print('Segmentos: {}'.format(len(segments)))
for name, segment in segments.items():
print('{} - ({:}, {:}) - ({:}, {:})'
.format(name, segment[0], segment[1], segment[2], segment[3]))
cs.drawWindow(left, right, bottom, top)
for name, points in segments.items():
cs.drawLine(name, *points, color='red')
result = cs.CohenSutherland(left, right, bottom, top, *points)
if result:
cs.drawLine(None, *result, linewidth=1.5, color='blue')
cs.show()
if __name__ == "__main__":
main()
| unlicense |
campbe13/openhatch | vendor/packages/Django/django/conf/locale/nn/formats.py | 108 | 1629 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| agpl-3.0 |
sunils34/buffer-django-nonrel | tests/regressiontests/admin_views/customadmin.py | 52 | 1279 | """
A second, custom AdminSite -- see tests.CustomAdminSiteTests.
"""
from django.conf.urls.defaults import patterns
from django.contrib import admin
from django.http import HttpResponse
import models, forms
class Admin2(admin.AdminSite):
login_form = forms.CustomAdminAuthenticationForm
login_template = 'custom_admin/login.html'
logout_template = 'custom_admin/logout.html'
index_template = 'custom_admin/index.html'
password_change_template = 'custom_admin/password_change_form.html'
password_change_done_template = 'custom_admin/password_change_done.html'
# A custom index view.
def index(self, request, extra_context=None):
return super(Admin2, self).index(request, {'foo': '*bar*'})
def get_urls(self):
return patterns('',
(r'^my_view/$', self.admin_view(self.my_view)),
) + super(Admin2, self).get_urls()
def my_view(self, request):
return HttpResponse("Django is a magical pony!")
site = Admin2(name="admin2")
site.register(models.Article, models.ArticleAdmin)
site.register(models.Section, inlines=[models.ArticleInline])
site.register(models.Thing, models.ThingAdmin)
site.register(models.Fabric, models.FabricAdmin)
site.register(models.ChapterXtra1, models.ChapterXtra1Admin)
| bsd-3-clause |
willgrass/pandas | pandas/stats/interface.py | 1 | 3692 | from pandas.core.api import Series
from pandas.stats.ols import OLS, MovingOLS
from pandas.stats.plm import PanelOLS, MovingPanelOLS, NonPooledPanelOLS
import pandas.stats.common as common
def ols(**kwargs):
"""Returns the appropriate OLS object depending on whether you need
simple or panel OLS, and a full-sample or rolling/expanding OLS.
Parameters
----------
y: Series for simple OLS. DataFrame for panel OLS.
x: Series, DataFrame, or dict of Series for simple OLS.
Dict of DataFrame for panel OLS.
intercept: bool
True if you want an intercept. Defaults to True.
nw_lags: None or int
Number of Newey-West lags. Defaults to None.
nw_overlap: bool
Whether there are overlaps in the NW lags. Defaults to False.
window_type: int
FULL_SAMPLE, ROLLING, EXPANDING. FULL_SAMPLE by default.
window: int
size of window (for rolling/expanding OLS)
Panel OLS options:
pool: bool
Whether to run pooled panel regression. Defaults to true.
weights: DataFrame
Weight for each observation. The weights are not normalized;
they're multiplied directly by each observation.
entity_effects: bool
Whether to account for entity fixed effects. Defaults to false.
time_effects: bool
Whether to account for time fixed effects. Defaults to false.
x_effects: list
List of x's to account for fixed effects. Defaults to none.
dropped_dummies: dict
Key is the name of the variable for the fixed effect.
Value is the value of that variable for which we drop the dummy.
For entity fixed effects, key equals 'entity'.
By default, the first dummy is dropped if no dummy is specified.
cluster: {'time', 'entity'}
cluster variances
Returns
-------
The appropriate OLS object, which allows you to obtain betas and various
statistics, such as std err, t-stat, etc.
Examples
--------
# Run simple OLS.
result = ols(y=y, x=x)
# Run rolling simple OLS with window of size 10.
result = ols(y=y, x=x, window_type=ROLLING, window=10)
print result.beta
result = ols(y=y, x=x, nw_lags=1)
# Set up LHS and RHS for data across all items
y = A
x = {'B' : B, 'C' : C}
# Run panel OLS.
result = ols(y=y, x=x)
# Run expanding panel OLS with window 10 and entity clustering.
result = ols(y=y, x=x, cluster=ENTITY, window_type=EXPANDING, window=10)
"""
try:
import scipy as _
except ImportError:
raise Exception('Must install SciPy to use OLS functionality')
pool = kwargs.get('pool')
if 'pool' in kwargs:
del kwargs['pool']
window_type = kwargs.get('window_type', common.FULL_SAMPLE)
window_type = common._get_window_type(window_type)
y = kwargs.get('y')
if window_type == common.FULL_SAMPLE:
for rolling_field in ('window_type', 'window', 'min_periods'):
if rolling_field in kwargs:
del kwargs[rolling_field]
if isinstance(y, Series):
klass = OLS
else:
if pool == False:
klass = NonPooledPanelOLS
else:
klass = PanelOLS
else:
if isinstance(y, Series):
klass = MovingOLS
else:
if pool == False:
klass = NonPooledPanelOLS
else:
klass = MovingPanelOLS
return klass(**kwargs)
| bsd-3-clause |
dpyro/servo | tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/_stream_hybi.py | 628 | 31933 | # Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file provides classes and helper functions for parsing/building frames
of the WebSocket protocol (RFC 6455).
Specification:
http://tools.ietf.org/html/rfc6455
"""
from collections import deque
import logging
import os
import struct
import time
from mod_pywebsocket import common
from mod_pywebsocket import util
from mod_pywebsocket._stream_base import BadOperationException
from mod_pywebsocket._stream_base import ConnectionTerminatedException
from mod_pywebsocket._stream_base import InvalidFrameException
from mod_pywebsocket._stream_base import InvalidUTF8Exception
from mod_pywebsocket._stream_base import StreamBase
from mod_pywebsocket._stream_base import UnsupportedFrameException
_NOOP_MASKER = util.NoopMasker()
class Frame(object):
def __init__(self, fin=1, rsv1=0, rsv2=0, rsv3=0,
opcode=None, payload=''):
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.payload = payload
# Helper functions made public to be used for writing unittests for WebSocket
# clients.
def create_length_header(length, mask):
"""Creates a length header.
Args:
length: Frame length. Must be less than 2^63.
mask: Mask bit. Must be boolean.
Raises:
ValueError: when bad data is given.
"""
if mask:
mask_bit = 1 << 7
else:
mask_bit = 0
if length < 0:
raise ValueError('length must be non negative integer')
elif length <= 125:
return chr(mask_bit | length)
elif length < (1 << 16):
return chr(mask_bit | 126) + struct.pack('!H', length)
elif length < (1 << 63):
return chr(mask_bit | 127) + struct.pack('!Q', length)
else:
raise ValueError('Payload is too big for one frame')
def create_header(opcode, payload_length, fin, rsv1, rsv2, rsv3, mask):
"""Creates a frame header.
Raises:
Exception: when bad data is given.
"""
if opcode < 0 or 0xf < opcode:
raise ValueError('Opcode out of range')
if payload_length < 0 or (1 << 63) <= payload_length:
raise ValueError('payload_length out of range')
if (fin | rsv1 | rsv2 | rsv3) & ~1:
raise ValueError('FIN bit and Reserved bit parameter must be 0 or 1')
header = ''
first_byte = ((fin << 7)
| (rsv1 << 6) | (rsv2 << 5) | (rsv3 << 4)
| opcode)
header += chr(first_byte)
header += create_length_header(payload_length, mask)
return header
def _build_frame(header, body, mask):
if not mask:
return header + body
masking_nonce = os.urandom(4)
masker = util.RepeatedXorMasker(masking_nonce)
return header + masking_nonce + masker.mask(body)
def _filter_and_format_frame_object(frame, mask, frame_filters):
for frame_filter in frame_filters:
frame_filter.filter(frame)
header = create_header(
frame.opcode, len(frame.payload), frame.fin,
frame.rsv1, frame.rsv2, frame.rsv3, mask)
return _build_frame(header, frame.payload, mask)
def create_binary_frame(
message, opcode=common.OPCODE_BINARY, fin=1, mask=False, frame_filters=[]):
"""Creates a simple binary frame with no extension, reserved bit."""
frame = Frame(fin=fin, opcode=opcode, payload=message)
return _filter_and_format_frame_object(frame, mask, frame_filters)
def create_text_frame(
message, opcode=common.OPCODE_TEXT, fin=1, mask=False, frame_filters=[]):
"""Creates a simple text frame with no extension, reserved bit."""
encoded_message = message.encode('utf-8')
return create_binary_frame(encoded_message, opcode, fin, mask,
frame_filters)
def parse_frame(receive_bytes, logger=None,
ws_version=common.VERSION_HYBI_LATEST,
unmask_receive=True):
"""Parses a frame. Returns a tuple containing each header field and
payload.
Args:
receive_bytes: a function that reads frame data from a stream or
something similar. The function takes length of the bytes to be
read. The function must raise ConnectionTerminatedException if
there is not enough data to be read.
logger: a logging object.
ws_version: the version of WebSocket protocol.
unmask_receive: unmask received frames. When received unmasked
frame, raises InvalidFrameException.
Raises:
ConnectionTerminatedException: when receive_bytes raises it.
InvalidFrameException: when the frame contains invalid data.
"""
if not logger:
logger = logging.getLogger()
logger.log(common.LOGLEVEL_FINE, 'Receive the first 2 octets of a frame')
received = receive_bytes(2)
first_byte = ord(received[0])
fin = (first_byte >> 7) & 1
rsv1 = (first_byte >> 6) & 1
rsv2 = (first_byte >> 5) & 1
rsv3 = (first_byte >> 4) & 1
opcode = first_byte & 0xf
second_byte = ord(received[1])
mask = (second_byte >> 7) & 1
payload_length = second_byte & 0x7f
logger.log(common.LOGLEVEL_FINE,
'FIN=%s, RSV1=%s, RSV2=%s, RSV3=%s, opcode=%s, '
'Mask=%s, Payload_length=%s',
fin, rsv1, rsv2, rsv3, opcode, mask, payload_length)
if (mask == 1) != unmask_receive:
raise InvalidFrameException(
'Mask bit on the received frame did\'nt match masking '
'configuration for received frames')
# The HyBi and later specs disallow putting a value in 0x0-0xFFFF
# into the 8-octet extended payload length field (or 0x0-0xFD in
# 2-octet field).
valid_length_encoding = True
length_encoding_bytes = 1
if payload_length == 127:
logger.log(common.LOGLEVEL_FINE,
'Receive 8-octet extended payload length')
extended_payload_length = receive_bytes(8)
payload_length = struct.unpack(
'!Q', extended_payload_length)[0]
if payload_length > 0x7FFFFFFFFFFFFFFF:
raise InvalidFrameException(
'Extended payload length >= 2^63')
if ws_version >= 13 and payload_length < 0x10000:
valid_length_encoding = False
length_encoding_bytes = 8
logger.log(common.LOGLEVEL_FINE,
'Decoded_payload_length=%s', payload_length)
elif payload_length == 126:
logger.log(common.LOGLEVEL_FINE,
'Receive 2-octet extended payload length')
extended_payload_length = receive_bytes(2)
payload_length = struct.unpack(
'!H', extended_payload_length)[0]
if ws_version >= 13 and payload_length < 126:
valid_length_encoding = False
length_encoding_bytes = 2
logger.log(common.LOGLEVEL_FINE,
'Decoded_payload_length=%s', payload_length)
if not valid_length_encoding:
logger.warning(
'Payload length is not encoded using the minimal number of '
'bytes (%d is encoded using %d bytes)',
payload_length,
length_encoding_bytes)
if mask == 1:
logger.log(common.LOGLEVEL_FINE, 'Receive mask')
masking_nonce = receive_bytes(4)
masker = util.RepeatedXorMasker(masking_nonce)
logger.log(common.LOGLEVEL_FINE, 'Mask=%r', masking_nonce)
else:
masker = _NOOP_MASKER
logger.log(common.LOGLEVEL_FINE, 'Receive payload data')
if logger.isEnabledFor(common.LOGLEVEL_FINE):
receive_start = time.time()
raw_payload_bytes = receive_bytes(payload_length)
if logger.isEnabledFor(common.LOGLEVEL_FINE):
logger.log(
common.LOGLEVEL_FINE,
'Done receiving payload data at %s MB/s',
payload_length / (time.time() - receive_start) / 1000 / 1000)
logger.log(common.LOGLEVEL_FINE, 'Unmask payload data')
if logger.isEnabledFor(common.LOGLEVEL_FINE):
unmask_start = time.time()
unmasked_bytes = masker.mask(raw_payload_bytes)
if logger.isEnabledFor(common.LOGLEVEL_FINE):
logger.log(
common.LOGLEVEL_FINE,
'Done unmasking payload data at %s MB/s',
payload_length / (time.time() - unmask_start) / 1000 / 1000)
return opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3
class FragmentedFrameBuilder(object):
"""A stateful class to send a message as fragments."""
def __init__(self, mask, frame_filters=[], encode_utf8=True):
"""Constructs an instance."""
self._mask = mask
self._frame_filters = frame_filters
# This is for skipping UTF-8 encoding when building text type frames
# from compressed data.
self._encode_utf8 = encode_utf8
self._started = False
# Hold opcode of the first frame in messages to verify types of other
# frames in the message are all the same.
self._opcode = common.OPCODE_TEXT
def build(self, payload_data, end, binary):
if binary:
frame_type = common.OPCODE_BINARY
else:
frame_type = common.OPCODE_TEXT
if self._started:
if self._opcode != frame_type:
raise ValueError('Message types are different in frames for '
'the same message')
opcode = common.OPCODE_CONTINUATION
else:
opcode = frame_type
self._opcode = frame_type
if end:
self._started = False
fin = 1
else:
self._started = True
fin = 0
if binary or not self._encode_utf8:
return create_binary_frame(
payload_data, opcode, fin, self._mask, self._frame_filters)
else:
return create_text_frame(
payload_data, opcode, fin, self._mask, self._frame_filters)
def _create_control_frame(opcode, body, mask, frame_filters):
frame = Frame(opcode=opcode, payload=body)
for frame_filter in frame_filters:
frame_filter.filter(frame)
if len(frame.payload) > 125:
raise BadOperationException(
'Payload data size of control frames must be 125 bytes or less')
header = create_header(
frame.opcode, len(frame.payload), frame.fin,
frame.rsv1, frame.rsv2, frame.rsv3, mask)
return _build_frame(header, frame.payload, mask)
def create_ping_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(common.OPCODE_PING, body, mask, frame_filters)
def create_pong_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(common.OPCODE_PONG, body, mask, frame_filters)
def create_close_frame(body, mask=False, frame_filters=[]):
return _create_control_frame(
common.OPCODE_CLOSE, body, mask, frame_filters)
def create_closing_handshake_body(code, reason):
body = ''
if code is not None:
if (code > common.STATUS_USER_PRIVATE_MAX or
code < common.STATUS_NORMAL_CLOSURE):
raise BadOperationException('Status code is out of range')
if (code == common.STATUS_NO_STATUS_RECEIVED or
code == common.STATUS_ABNORMAL_CLOSURE or
code == common.STATUS_TLS_HANDSHAKE):
raise BadOperationException('Status code is reserved pseudo '
'code')
encoded_reason = reason.encode('utf-8')
body = struct.pack('!H', code) + encoded_reason
return body
class StreamOptions(object):
"""Holds option values to configure Stream objects."""
def __init__(self):
"""Constructs StreamOptions."""
# Filters applied to frames.
self.outgoing_frame_filters = []
self.incoming_frame_filters = []
# Filters applied to messages. Control frames are not affected by them.
self.outgoing_message_filters = []
self.incoming_message_filters = []
self.encode_text_message_to_utf8 = True
self.mask_send = False
self.unmask_receive = True
class Stream(StreamBase):
"""A class for parsing/building frames of the WebSocket protocol
(RFC 6455).
"""
def __init__(self, request, options):
"""Constructs an instance.
Args:
request: mod_python request.
"""
StreamBase.__init__(self, request)
self._logger = util.get_class_logger(self)
self._options = options
self._request.client_terminated = False
self._request.server_terminated = False
# Holds body of received fragments.
self._received_fragments = []
# Holds the opcode of the first fragment.
self._original_opcode = None
self._writer = FragmentedFrameBuilder(
self._options.mask_send, self._options.outgoing_frame_filters,
self._options.encode_text_message_to_utf8)
self._ping_queue = deque()
def _receive_frame(self):
"""Receives a frame and return data in the frame as a tuple containing
each header field and payload separately.
Raises:
ConnectionTerminatedException: when read returns empty
string.
InvalidFrameException: when the frame contains invalid data.
"""
def _receive_bytes(length):
return self.receive_bytes(length)
return parse_frame(receive_bytes=_receive_bytes,
logger=self._logger,
ws_version=self._request.ws_version,
unmask_receive=self._options.unmask_receive)
def _receive_frame_as_frame_object(self):
opcode, unmasked_bytes, fin, rsv1, rsv2, rsv3 = self._receive_frame()
return Frame(fin=fin, rsv1=rsv1, rsv2=rsv2, rsv3=rsv3,
opcode=opcode, payload=unmasked_bytes)
def receive_filtered_frame(self):
"""Receives a frame and applies frame filters and message filters.
The frame to be received must satisfy following conditions:
- The frame is not fragmented.
- The opcode of the frame is TEXT or BINARY.
DO NOT USE this method except for testing purpose.
"""
frame = self._receive_frame_as_frame_object()
if not frame.fin:
raise InvalidFrameException(
'Segmented frames must not be received via '
'receive_filtered_frame()')
if (frame.opcode != common.OPCODE_TEXT and
frame.opcode != common.OPCODE_BINARY):
raise InvalidFrameException(
'Control frames must not be received via '
'receive_filtered_frame()')
for frame_filter in self._options.incoming_frame_filters:
frame_filter.filter(frame)
for message_filter in self._options.incoming_message_filters:
frame.payload = message_filter.filter(frame.payload)
return frame
def send_message(self, message, end=True, binary=False):
"""Send message.
Args:
message: text in unicode or binary in str to send.
binary: send message as binary frame.
Raises:
BadOperationException: when called on a server-terminated
connection or called with inconsistent message type or
binary parameter.
"""
if self._request.server_terminated:
raise BadOperationException(
'Requested send_message after sending out a closing handshake')
if binary and isinstance(message, unicode):
raise BadOperationException(
'Message for binary frame must be instance of str')
for message_filter in self._options.outgoing_message_filters:
message = message_filter.filter(message, end, binary)
try:
# Set this to any positive integer to limit maximum size of data in
# payload data of each frame.
MAX_PAYLOAD_DATA_SIZE = -1
if MAX_PAYLOAD_DATA_SIZE <= 0:
self._write(self._writer.build(message, end, binary))
return
bytes_written = 0
while True:
end_for_this_frame = end
bytes_to_write = len(message) - bytes_written
if (MAX_PAYLOAD_DATA_SIZE > 0 and
bytes_to_write > MAX_PAYLOAD_DATA_SIZE):
end_for_this_frame = False
bytes_to_write = MAX_PAYLOAD_DATA_SIZE
frame = self._writer.build(
message[bytes_written:bytes_written + bytes_to_write],
end_for_this_frame,
binary)
self._write(frame)
bytes_written += bytes_to_write
# This if must be placed here (the end of while block) so that
# at least one frame is sent.
if len(message) <= bytes_written:
break
except ValueError, e:
raise BadOperationException(e)
def _get_message_from_frame(self, frame):
"""Gets a message from frame. If the message is composed of fragmented
frames and the frame is not the last fragmented frame, this method
returns None. The whole message will be returned when the last
fragmented frame is passed to this method.
Raises:
InvalidFrameException: when the frame doesn't match defragmentation
context, or the frame contains invalid data.
"""
if frame.opcode == common.OPCODE_CONTINUATION:
if not self._received_fragments:
if frame.fin:
raise InvalidFrameException(
'Received a termination frame but fragmentation '
'not started')
else:
raise InvalidFrameException(
'Received an intermediate frame but '
'fragmentation not started')
if frame.fin:
# End of fragmentation frame
self._received_fragments.append(frame.payload)
message = ''.join(self._received_fragments)
self._received_fragments = []
return message
else:
# Intermediate frame
self._received_fragments.append(frame.payload)
return None
else:
if self._received_fragments:
if frame.fin:
raise InvalidFrameException(
'Received an unfragmented frame without '
'terminating existing fragmentation')
else:
raise InvalidFrameException(
'New fragmentation started without terminating '
'existing fragmentation')
if frame.fin:
# Unfragmented frame
self._original_opcode = frame.opcode
return frame.payload
else:
# Start of fragmentation frame
if common.is_control_opcode(frame.opcode):
raise InvalidFrameException(
'Control frames must not be fragmented')
self._original_opcode = frame.opcode
self._received_fragments.append(frame.payload)
return None
def _process_close_message(self, message):
"""Processes close message.
Args:
message: close message.
Raises:
InvalidFrameException: when the message is invalid.
"""
self._request.client_terminated = True
# Status code is optional. We can have status reason only if we
# have status code. Status reason can be empty string. So,
# allowed cases are
# - no application data: no code no reason
# - 2 octet of application data: has code but no reason
# - 3 or more octet of application data: both code and reason
if len(message) == 0:
self._logger.debug('Received close frame (empty body)')
self._request.ws_close_code = (
common.STATUS_NO_STATUS_RECEIVED)
elif len(message) == 1:
raise InvalidFrameException(
'If a close frame has status code, the length of '
'status code must be 2 octet')
elif len(message) >= 2:
self._request.ws_close_code = struct.unpack(
'!H', message[0:2])[0]
self._request.ws_close_reason = message[2:].decode(
'utf-8', 'replace')
self._logger.debug(
'Received close frame (code=%d, reason=%r)',
self._request.ws_close_code,
self._request.ws_close_reason)
# As we've received a close frame, no more data is coming over the
# socket. We can now safely close the socket without worrying about
# RST sending.
if self._request.server_terminated:
self._logger.debug(
'Received ack for server-initiated closing handshake')
return
self._logger.debug(
'Received client-initiated closing handshake')
code = common.STATUS_NORMAL_CLOSURE
reason = ''
if hasattr(self._request, '_dispatcher'):
dispatcher = self._request._dispatcher
code, reason = dispatcher.passive_closing_handshake(
self._request)
if code is None and reason is not None and len(reason) > 0:
self._logger.warning(
'Handler specified reason despite code being None')
reason = ''
if reason is None:
reason = ''
self._send_closing_handshake(code, reason)
self._logger.debug(
'Acknowledged closing handshake initiated by the peer '
'(code=%r, reason=%r)', code, reason)
def _process_ping_message(self, message):
"""Processes ping message.
Args:
message: ping message.
"""
try:
handler = self._request.on_ping_handler
if handler:
handler(self._request, message)
return
except AttributeError, e:
pass
self._send_pong(message)
def _process_pong_message(self, message):
"""Processes pong message.
Args:
message: pong message.
"""
# TODO(tyoshino): Add ping timeout handling.
inflight_pings = deque()
while True:
try:
expected_body = self._ping_queue.popleft()
if expected_body == message:
# inflight_pings contains pings ignored by the
# other peer. Just forget them.
self._logger.debug(
'Ping %r is acked (%d pings were ignored)',
expected_body, len(inflight_pings))
break
else:
inflight_pings.append(expected_body)
except IndexError, e:
# The received pong was unsolicited pong. Keep the
# ping queue as is.
self._ping_queue = inflight_pings
self._logger.debug('Received a unsolicited pong')
break
try:
handler = self._request.on_pong_handler
if handler:
handler(self._request, message)
except AttributeError, e:
pass
def receive_message(self):
"""Receive a WebSocket frame and return its payload as a text in
unicode or a binary in str.
Returns:
payload data of the frame
- as unicode instance if received text frame
- as str instance if received binary frame
or None iff received closing handshake.
Raises:
BadOperationException: when called on a client-terminated
connection.
ConnectionTerminatedException: when read returns empty
string.
InvalidFrameException: when the frame contains invalid
data.
UnsupportedFrameException: when the received frame has
flags, opcode we cannot handle. You can ignore this
exception and continue receiving the next frame.
"""
if self._request.client_terminated:
raise BadOperationException(
'Requested receive_message after receiving a closing '
'handshake')
while True:
# mp_conn.read will block if no bytes are available.
# Timeout is controlled by TimeOut directive of Apache.
frame = self._receive_frame_as_frame_object()
# Check the constraint on the payload size for control frames
# before extension processes the frame.
# See also http://tools.ietf.org/html/rfc6455#section-5.5
if (common.is_control_opcode(frame.opcode) and
len(frame.payload) > 125):
raise InvalidFrameException(
'Payload data size of control frames must be 125 bytes or '
'less')
for frame_filter in self._options.incoming_frame_filters:
frame_filter.filter(frame)
if frame.rsv1 or frame.rsv2 or frame.rsv3:
raise UnsupportedFrameException(
'Unsupported flag is set (rsv = %d%d%d)' %
(frame.rsv1, frame.rsv2, frame.rsv3))
message = self._get_message_from_frame(frame)
if message is None:
continue
for message_filter in self._options.incoming_message_filters:
message = message_filter.filter(message)
if self._original_opcode == common.OPCODE_TEXT:
# The WebSocket protocol section 4.4 specifies that invalid
# characters must be replaced with U+fffd REPLACEMENT
# CHARACTER.
try:
return message.decode('utf-8')
except UnicodeDecodeError, e:
raise InvalidUTF8Exception(e)
elif self._original_opcode == common.OPCODE_BINARY:
return message
elif self._original_opcode == common.OPCODE_CLOSE:
self._process_close_message(message)
return None
elif self._original_opcode == common.OPCODE_PING:
self._process_ping_message(message)
elif self._original_opcode == common.OPCODE_PONG:
self._process_pong_message(message)
else:
raise UnsupportedFrameException(
'Opcode %d is not supported' % self._original_opcode)
def _send_closing_handshake(self, code, reason):
body = create_closing_handshake_body(code, reason)
frame = create_close_frame(
body, mask=self._options.mask_send,
frame_filters=self._options.outgoing_frame_filters)
self._request.server_terminated = True
self._write(frame)
def close_connection(self, code=common.STATUS_NORMAL_CLOSURE, reason='',
wait_response=True):
"""Closes a WebSocket connection.
Args:
code: Status code for close frame. If code is None, a close
frame with empty body will be sent.
reason: string representing close reason.
wait_response: True when caller want to wait the response.
Raises:
BadOperationException: when reason is specified with code None
or reason is not an instance of both str and unicode.
"""
if self._request.server_terminated:
self._logger.debug(
'Requested close_connection but server is already terminated')
return
if code is None:
if reason is not None and len(reason) > 0:
raise BadOperationException(
'close reason must not be specified if code is None')
reason = ''
else:
if not isinstance(reason, str) and not isinstance(reason, unicode):
raise BadOperationException(
'close reason must be an instance of str or unicode')
self._send_closing_handshake(code, reason)
self._logger.debug(
'Initiated closing handshake (code=%r, reason=%r)',
code, reason)
if (code == common.STATUS_GOING_AWAY or
code == common.STATUS_PROTOCOL_ERROR) or not wait_response:
# It doesn't make sense to wait for a close frame if the reason is
# protocol error or that the server is going away. For some of
# other reasons, it might not make sense to wait for a close frame,
# but it's not clear, yet.
return
# TODO(ukai): 2. wait until the /client terminated/ flag has been set,
# or until a server-defined timeout expires.
#
# For now, we expect receiving closing handshake right after sending
# out closing handshake.
message = self.receive_message()
if message is not None:
raise ConnectionTerminatedException(
'Didn\'t receive valid ack for closing handshake')
# TODO: 3. close the WebSocket connection.
# note: mod_python Connection (mp_conn) doesn't have close method.
def send_ping(self, body=''):
frame = create_ping_frame(
body,
self._options.mask_send,
self._options.outgoing_frame_filters)
self._write(frame)
self._ping_queue.append(body)
def _send_pong(self, body):
frame = create_pong_frame(
body,
self._options.mask_send,
self._options.outgoing_frame_filters)
self._write(frame)
def get_last_received_opcode(self):
"""Returns the opcode of the WebSocket message which the last received
frame belongs to. The return value is valid iff immediately after
receive_message call.
"""
return self._original_opcode
# vi:sts=4 sw=4 et
| mpl-2.0 |
insiderr/insiderr-app | ios-patches/basemodules/twisted/internet/test/test_tcp.py | 5 | 82780 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for implementations of L{IReactorTCP} and the TCP parts of
L{IReactorSocket}.
"""
from __future__ import division, absolute_import
__metaclass__ = type
import errno
import socket
from functools import wraps
from zope.interface import implementer
from zope.interface.verify import verifyClass
from twisted.python.runtime import platform
from twisted.python.failure import Failure
from twisted.python import log
from twisted.trial.unittest import SkipTest, TestCase
from twisted.internet.error import (
ConnectionLost, UserError, ConnectionRefusedError, ConnectionDone,
ConnectionAborted, DNSLookupError)
from twisted.internet.test.connectionmixins import (
LogObserverMixin, ConnectionTestsMixin, StreamClientTestsMixin,
findFreePort, ConnectableProtocol, EndpointCreator,
runProtocolsWithReactor, Stop, BrokenContextFactory)
from twisted.internet.test.reactormixins import (
ReactorBuilder, needsRunningReactor)
from twisted.internet.interfaces import (
ILoggingContext, IConnector, IReactorFDSet, IReactorSocket, IReactorTCP,
IResolverSimple, ITLSTransport)
from twisted.internet.address import IPv4Address, IPv6Address
from twisted.internet.defer import (
Deferred, DeferredList, maybeDeferred, gatherResults, succeed, fail)
from twisted.internet.endpoints import TCP4ServerEndpoint, TCP4ClientEndpoint
from twisted.internet.protocol import ServerFactory, ClientFactory, Protocol
from twisted.internet.interfaces import (
IPushProducer, IPullProducer, IHalfCloseableProtocol)
from twisted.internet.tcp import Connection, Server, _resolveIPv6
from twisted.internet.test.test_core import ObjectModelIntegrationMixin
from twisted.test.test_tcp import MyClientFactory, MyServerFactory
from twisted.test.test_tcp import ClosingFactory, ClientStartStopFactory
try:
from OpenSSL import SSL
except ImportError:
useSSL = False
else:
from twisted.internet.ssl import ClientContextFactory
useSSL = True
try:
socket.socket(socket.AF_INET6, socket.SOCK_STREAM).close()
except socket.error as e:
ipv6Skip = str(e)
else:
ipv6Skip = None
if platform.isWindows():
from twisted.internet.test import _win32ifaces
getLinkLocalIPv6Addresses = _win32ifaces.win32GetLinkLocalIPv6Addresses
else:
try:
from twisted.internet.test import _posixifaces
except ImportError:
getLinkLocalIPv6Addresses = lambda: []
else:
getLinkLocalIPv6Addresses = _posixifaces.posixGetLinkLocalIPv6Addresses
def getLinkLocalIPv6Address():
"""
Find and return a configured link local IPv6 address including a scope
identifier using the % separation syntax. If the system has no link local
IPv6 addresses, raise L{SkipTest} instead.
@raise SkipTest: if no link local address can be found or if the
C{netifaces} module is not available.
@return: a C{str} giving the address
"""
addresses = getLinkLocalIPv6Addresses()
if addresses:
return addresses[0]
raise SkipTest("Link local IPv6 address unavailable")
def connect(client, destination):
"""
Connect a socket to the given destination.
@param client: A C{socket.socket}.
@param destination: A tuple of (host, port). The host is a C{str}, the
port a C{int}. If the C{host} is an IPv6 IP, the address is resolved
using C{getaddrinfo} and the first version found is used.
"""
(host, port) = destination
if '%' in host or ':' in host:
address = socket.getaddrinfo(host, port)[0][4]
else:
address = (host, port)
client.connect(address)
class FakeSocket(object):
"""
A fake for L{socket.socket} objects.
@ivar data: A C{str} giving the data which will be returned from
L{FakeSocket.recv}.
@ivar sendBuffer: A C{list} of the objects passed to L{FakeSocket.send}.
"""
def __init__(self, data):
self.data = data
self.sendBuffer = []
def setblocking(self, blocking):
self.blocking = blocking
def recv(self, size):
return self.data
def send(self, bytes):
"""
I{Send} all of C{bytes} by accumulating it into C{self.sendBuffer}.
@return: The length of C{bytes}, indicating all the data has been
accepted.
"""
self.sendBuffer.append(bytes)
return len(bytes)
def shutdown(self, how):
"""
Shutdown is not implemented. The method is provided since real sockets
have it and some code expects it. No behavior of L{FakeSocket} is
affected by a call to it.
"""
def close(self):
"""
Close is not implemented. The method is provided since real sockets
have it and some code expects it. No behavior of L{FakeSocket} is
affected by a call to it.
"""
def setsockopt(self, *args):
"""
Setsockopt is not implemented. The method is provided since
real sockets have it and some code expects it. No behavior of
L{FakeSocket} is affected by a call to it.
"""
def fileno(self):
"""
Return a fake file descriptor. If actually used, this will have no
connection to this L{FakeSocket} and will probably cause surprising
results.
"""
return 1
class TestFakeSocket(TestCase):
"""
Test that the FakeSocket can be used by the doRead method of L{Connection}
"""
def test_blocking(self):
skt = FakeSocket(b"someData")
skt.setblocking(0)
self.assertEqual(skt.blocking, 0)
def test_recv(self):
skt = FakeSocket(b"someData")
self.assertEqual(skt.recv(10), b"someData")
def test_send(self):
"""
L{FakeSocket.send} accepts the entire string passed to it, adds it to
its send buffer, and returns its length.
"""
skt = FakeSocket(b"")
count = skt.send(b"foo")
self.assertEqual(count, 3)
self.assertEqual(skt.sendBuffer, [b"foo"])
class FakeProtocol(Protocol):
"""
An L{IProtocol} that returns a value from its dataReceived method.
"""
def dataReceived(self, data):
"""
Return something other than C{None} to trigger a deprecation warning for
that behavior.
"""
return ()
@implementer(IReactorFDSet)
class _FakeFDSetReactor(object):
"""
An in-memory implementation of L{IReactorFDSet}, which records the current
sets of active L{IReadDescriptor} and L{IWriteDescriptor}s.
@ivar _readers: The set of of L{IReadDescriptor}s active on this
L{_FakeFDSetReactor}
@type _readers: L{set}
@ivar _writers: The set of of L{IWriteDescriptor}s active on this
L{_FakeFDSetReactor}
@ivar _writers: L{set}
"""
def __init__(self):
self._readers = set()
self._writers = set()
def addReader(self, reader):
self._readers.add(reader)
def removeReader(self, reader):
if reader in self._readers:
self._readers.remove(reader)
def addWriter(self, writer):
self._writers.add(writer)
def removeWriter(self, writer):
if writer in self._writers:
self._writers.remove(writer)
def removeAll(self):
result = self.getReaders() + self.getWriters()
self.__init__()
return result
def getReaders(self):
return list(self._readers)
def getWriters(self):
return list(self._writers)
verifyClass(IReactorFDSet, _FakeFDSetReactor)
class TCPServerTests(TestCase):
"""
Whitebox tests for L{twisted.internet.tcp.Server}.
"""
def setUp(self):
self.reactor = _FakeFDSetReactor()
class FakePort(object):
_realPortNumber = 3
self.skt = FakeSocket(b"")
self.protocol = Protocol()
self.server = Server(
self.skt, self.protocol, ("", 0), FakePort(), None, self.reactor)
def test_writeAfterDisconnect(self):
"""
L{Server.write} discards bytes passed to it if called after it has lost
its connection.
"""
self.server.connectionLost(
Failure(Exception("Simulated lost connection")))
self.server.write(b"hello world")
self.assertEqual(self.skt.sendBuffer, [])
def test_writeAfteDisconnectAfterTLS(self):
"""
L{Server.write} discards bytes passed to it if called after it has lost
its connection when the connection had started TLS.
"""
self.server.TLS = True
self.test_writeAfterDisconnect()
def test_writeSequenceAfterDisconnect(self):
"""
L{Server.writeSequence} discards bytes passed to it if called after it
has lost its connection.
"""
self.server.connectionLost(
Failure(Exception("Simulated lost connection")))
self.server.writeSequence([b"hello world"])
self.assertEqual(self.skt.sendBuffer, [])
def test_writeSequenceAfteDisconnectAfterTLS(self):
"""
L{Server.writeSequence} discards bytes passed to it if called after it
has lost its connection when the connection had started TLS.
"""
self.server.TLS = True
self.test_writeSequenceAfterDisconnect()
class TCPConnectionTests(TestCase):
"""
Whitebox tests for L{twisted.internet.tcp.Connection}.
"""
def test_doReadWarningIsRaised(self):
"""
When an L{IProtocol} implementation that returns a value from its
C{dataReceived} method, a deprecated warning is emitted.
"""
skt = FakeSocket(b"someData")
protocol = FakeProtocol()
conn = Connection(skt, protocol)
conn.doRead()
warnings = self.flushWarnings([FakeProtocol.dataReceived])
self.assertEqual(warnings[0]['category'], DeprecationWarning)
self.assertEqual(
warnings[0]["message"],
"Returning a value other than None from "
"twisted.internet.test.test_tcp.FakeProtocol.dataReceived "
"is deprecated since Twisted 11.0.0.")
self.assertEqual(len(warnings), 1)
def test_noTLSBeforeStartTLS(self):
"""
The C{TLS} attribute of a L{Connection} instance is C{False} before
L{Connection.startTLS} is called.
"""
skt = FakeSocket(b"")
protocol = FakeProtocol()
conn = Connection(skt, protocol)
self.assertFalse(conn.TLS)
def test_tlsAfterStartTLS(self):
"""
The C{TLS} attribute of a L{Connection} instance is C{True} after
L{Connection.startTLS} is called.
"""
skt = FakeSocket(b"")
protocol = FakeProtocol()
conn = Connection(skt, protocol, reactor=_FakeFDSetReactor())
conn._tlsClientDefault = True
conn.startTLS(ClientContextFactory(), True)
self.assertTrue(conn.TLS)
if not useSSL:
test_tlsAfterStartTLS.skip = "No SSL support available"
class TCPCreator(EndpointCreator):
"""
Create IPv4 TCP endpoints for L{runProtocolsWithReactor}-based tests.
"""
interface = "127.0.0.1"
def server(self, reactor):
"""
Create a server-side TCP endpoint.
"""
return TCP4ServerEndpoint(reactor, 0, interface=self.interface)
def client(self, reactor, serverAddress):
"""
Create a client end point that will connect to the given address.
@type serverAddress: L{IPv4Address}
"""
return TCP4ClientEndpoint(reactor, self.interface, serverAddress.port)
class TCP6Creator(TCPCreator):
"""
Create IPv6 TCP endpoints for
C{ReactorBuilder.runProtocolsWithReactor}-based tests.
The endpoint types in question here are still the TCP4 variety, since
these simply pass through IPv6 address literals to the reactor, and we are
only testing address literals, not name resolution (as name resolution has
not yet been implemented). See http://twistedmatrix.com/trac/ticket/4470
for more specific information about new endpoint classes. The naming is
slightly misleading, but presumably if you're passing an IPv6 literal, you
know what you're asking for.
"""
def __init__(self):
self.interface = getLinkLocalIPv6Address()
@implementer(IResolverSimple)
class FakeResolver(object):
"""
A resolver implementation based on a C{dict} mapping names to addresses.
"""
def __init__(self, names):
self.names = names
def getHostByName(self, name, timeout):
"""
Return the address mapped to C{name} if it exists, or raise a
C{DNSLookupError}.
@param name: The name to resolve.
@param timeout: The lookup timeout, ignore here.
"""
try:
return succeed(self.names[name])
except KeyError:
return fail(DNSLookupError("FakeResolver couldn't find " + name))
class TCPClientTestsBase(ReactorBuilder, ConnectionTestsMixin,
StreamClientTestsMixin):
"""
Base class for builders defining tests related to
L{IReactorTCP.connectTCP}. Classes which uses this in must provide all of
the documented instance variables in order to specify how the test works.
These are documented as instance variables rather than declared as methods
due to some peculiar inheritance ordering concerns, but they are
effectively abstract methods.
@ivar endpoints: A L{twisted.internet.test.reactormixins.EndpointCreator}
instance.
@ivar interface: An IP address literal to locally bind a socket to as well
as to connect to. This can be any valid interface for the local host.
@type interface: C{str}
@ivar port: An unused local listening port to listen on and connect to.
This will be used in conjunction with the C{interface}. (Depending on
what they're testing, some tests will locate their own port with
L{findFreePort} instead.)
@type port: C{int}
@ivar family: an address family constant, such as L{socket.AF_INET},
L{socket.AF_INET6}, or L{socket.AF_UNIX}, which indicates the address
family of the transport type under test.
@type family: C{int}
@ivar addressClass: the L{twisted.internet.interfaces.IAddress} implementor
associated with the transport type under test. Must also be a
3-argument callable which produces an instance of same.
@type addressClass: C{type}
@ivar fakeDomainName: A fake domain name to use, to simulate hostname
resolution and to distinguish between hostnames and IP addresses where
necessary.
@type fakeDomainName: C{str}
"""
requiredInterfaces = (IReactorTCP,)
_port = None
@property
def port(self):
"""
Return the port number to connect to, using C{self._port} set up by
C{listen} if available.
@return: The port number to connect to.
@rtype: C{int}
"""
if self._port is not None:
return self._port.getHost().port
return findFreePort(self.interface, self.family)[1]
@property
def interface(self):
"""
Return the interface attribute from the endpoints object.
"""
return self.endpoints.interface
def listen(self, reactor, factory):
"""
Start a TCP server with the given C{factory}.
@param reactor: The reactor to create the TCP port in.
@param factory: The server factory.
@return: A TCP port instance.
"""
self._port = reactor.listenTCP(0, factory, interface=self.interface)
return self._port
def connect(self, reactor, factory):
"""
Start a TCP client with the given C{factory}.
@param reactor: The reactor to create the connection in.
@param factory: The client factory.
@return: A TCP connector instance.
"""
return reactor.connectTCP(self.interface, self.port, factory)
def test_addresses(self):
"""
A client's transport's C{getHost} and C{getPeer} return L{IPv4Address}
instances which have the dotted-quad string form of the resolved
adddress of the local and remote endpoints of the connection
respectively as their C{host} attribute, not the hostname originally
passed in to
L{connectTCP<twisted.internet.interfaces.IReactorTCP.connectTCP>}, if a
hostname was used.
"""
host, port = findFreePort(self.interface, self.family)[:2]
reactor = self.buildReactor()
fakeDomain = self.fakeDomainName
reactor.installResolver(FakeResolver({fakeDomain: self.interface}))
server = reactor.listenTCP(
0, ServerFactory.forProtocol(Protocol), interface=host)
serverAddress = server.getHost()
transportData = {'host': None, 'peer': None, 'instance': None}
class CheckAddress(Protocol):
def makeConnection(self, transport):
transportData['host'] = transport.getHost()
transportData['peer'] = transport.getPeer()
transportData['instance'] = transport
reactor.stop()
clientFactory = Stop(reactor)
clientFactory.protocol = CheckAddress
def connectMe():
reactor.connectTCP(
fakeDomain, server.getHost().port, clientFactory,
bindAddress=(self.interface, port))
needsRunningReactor(reactor, connectMe)
self.runReactor(reactor)
if clientFactory.failReason:
self.fail(clientFactory.failReason.getTraceback())
transportRepr = "<%s to %s at %x>" % (
transportData['instance'].__class__,
transportData['instance'].addr,
id(transportData['instance']))
self.assertEqual(
transportData['host'],
self.addressClass('TCP', self.interface, port))
self.assertEqual(
transportData['peer'],
self.addressClass('TCP', self.interface, serverAddress.port))
self.assertEqual(
repr(transportData['instance']), transportRepr)
def test_badContext(self):
"""
If the context factory passed to L{ITCPTransport.startTLS} raises an
exception from its C{getContext} method, that exception is raised by
L{ITCPTransport.startTLS}.
"""
reactor = self.buildReactor()
brokenFactory = BrokenContextFactory()
results = []
serverFactory = ServerFactory.forProtocol(Protocol)
port = reactor.listenTCP(0, serverFactory, interface=self.interface)
endpoint = self.endpoints.client(reactor, port.getHost())
clientFactory = ClientFactory()
clientFactory.protocol = Protocol
connectDeferred = endpoint.connect(clientFactory)
def connected(protocol):
if not ITLSTransport.providedBy(protocol.transport):
results.append("skip")
else:
results.append(self.assertRaises(ValueError,
protocol.transport.startTLS,
brokenFactory))
def connectFailed(failure):
results.append(failure)
def whenRun():
connectDeferred.addCallback(connected)
connectDeferred.addErrback(connectFailed)
connectDeferred.addBoth(lambda ign: reactor.stop())
needsRunningReactor(reactor, whenRun)
self.runReactor(reactor)
self.assertEqual(len(results), 1,
"more than one callback result: %s" % (results,))
if isinstance(results[0], Failure):
# self.fail(Failure)
results[0].raiseException()
if results[0] == "skip":
raise SkipTest("Reactor does not support ITLSTransport")
self.assertEqual(BrokenContextFactory.message, str(results[0]))
class TCP4ClientTestsBuilder(TCPClientTestsBase):
"""
Builder configured with IPv4 parameters for tests related to
L{IReactorTCP.connectTCP}.
"""
fakeDomainName = 'some-fake.domain.example.com'
family = socket.AF_INET
addressClass = IPv4Address
endpoints = TCPCreator()
class TCP6ClientTestsBuilder(TCPClientTestsBase):
"""
Builder configured with IPv6 parameters for tests related to
L{IReactorTCP.connectTCP}.
"""
if ipv6Skip:
skip = ipv6Skip
family = socket.AF_INET6
addressClass = IPv6Address
def setUp(self):
# Only create this object here, so that it won't be created if tests
# are being skipped:
self.endpoints = TCP6Creator()
# This is used by test_addresses to test the distinction between the
# resolved name and the name on the socket itself. All the same
# invariants should hold, but giving back an IPv6 address from a
# resolver is not something the reactor can handle, so instead, we make
# it so that the connect call for the IPv6 address test simply uses an
# address literal.
self.fakeDomainName = self.endpoints.interface
class TCPConnectorTestsBuilder(ReactorBuilder):
"""
Tests for the L{IConnector} provider returned by L{IReactorTCP.connectTCP}.
"""
requiredInterfaces = (IReactorTCP,)
def test_connectorIdentity(self):
"""
L{IReactorTCP.connectTCP} returns an object which provides
L{IConnector}. The destination of the connector is the address which
was passed to C{connectTCP}. The same connector object is passed to
the factory's C{startedConnecting} method as to the factory's
C{clientConnectionLost} method.
"""
serverFactory = ClosingFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
serverFactory.port = tcpPort
portNumber = tcpPort.getHost().port
seenConnectors = []
seenFailures = []
clientFactory = ClientStartStopFactory()
clientFactory.clientConnectionLost = (
lambda connector, reason: (seenConnectors.append(connector),
seenFailures.append(reason)))
clientFactory.startedConnecting = seenConnectors.append
connector = reactor.connectTCP(self.interface, portNumber,
clientFactory)
self.assertTrue(IConnector.providedBy(connector))
dest = connector.getDestination()
self.assertEqual(dest.type, "TCP")
self.assertEqual(dest.host, self.interface)
self.assertEqual(dest.port, portNumber)
clientFactory.whenStopped.addBoth(lambda _: reactor.stop())
self.runReactor(reactor)
seenFailures[0].trap(ConnectionDone)
self.assertEqual(seenConnectors, [connector, connector])
def test_userFail(self):
"""
Calling L{IConnector.stopConnecting} in C{Factory.startedConnecting}
results in C{Factory.clientConnectionFailed} being called with
L{error.UserError} as the reason.
"""
serverFactory = MyServerFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
portNumber = tcpPort.getHost().port
fatalErrors = []
def startedConnecting(connector):
try:
connector.stopConnecting()
except Exception:
fatalErrors.append(Failure())
reactor.stop()
clientFactory = ClientStartStopFactory()
clientFactory.startedConnecting = startedConnecting
clientFactory.whenStopped.addBoth(lambda _: reactor.stop())
reactor.callWhenRunning(lambda: reactor.connectTCP(self.interface,
portNumber,
clientFactory))
self.runReactor(reactor)
if fatalErrors:
self.fail(fatalErrors[0].getTraceback())
clientFactory.reason.trap(UserError)
self.assertEqual(clientFactory.failed, 1)
def test_reconnect(self):
"""
Calling L{IConnector.connect} in C{Factory.clientConnectionLost} causes
a new connection attempt to be made.
"""
serverFactory = ClosingFactory()
reactor = self.buildReactor()
tcpPort = reactor.listenTCP(0, serverFactory, interface=self.interface)
serverFactory.port = tcpPort
portNumber = tcpPort.getHost().port
clientFactory = MyClientFactory()
def clientConnectionLost(connector, reason):
connector.connect()
clientFactory.clientConnectionLost = clientConnectionLost
reactor.connectTCP(self.interface, portNumber, clientFactory)
protocolMadeAndClosed = []
def reconnectFailed(ignored):
p = clientFactory.protocol
protocolMadeAndClosed.append((p.made, p.closed))
reactor.stop()
clientFactory.failDeferred.addCallback(reconnectFailed)
self.runReactor(reactor)
clientFactory.reason.trap(ConnectionRefusedError)
self.assertEqual(protocolMadeAndClosed, [(1, 1)])
class TCP4ConnectorTestsBuilder(TCPConnectorTestsBuilder):
interface = '127.0.0.1'
family = socket.AF_INET
addressClass = IPv4Address
class TCP6ConnectorTestsBuilder(TCPConnectorTestsBuilder):
family = socket.AF_INET6
addressClass = IPv6Address
if ipv6Skip:
skip = ipv6Skip
def setUp(self):
self.interface = getLinkLocalIPv6Address()
def createTestSocket(test, addressFamily, socketType):
"""
Create a socket for the duration of the given test.
@param test: the test to add cleanup to.
@param addressFamily: an C{AF_*} constant
@param socketType: a C{SOCK_*} constant.
@return: a socket object.
"""
skt = socket.socket(addressFamily, socketType)
test.addCleanup(skt.close)
return skt
class StreamTransportTestsMixin(LogObserverMixin):
"""
Mixin defining tests which apply to any port/connection based transport.
"""
def test_startedListeningLogMessage(self):
"""
When a port starts, a message including a description of the associated
factory is logged.
"""
loggedMessages = self.observe()
reactor = self.buildReactor()
@implementer(ILoggingContext)
class SomeFactory(ServerFactory):
def logPrefix(self):
return "Crazy Factory"
factory = SomeFactory()
p = self.getListeningPort(reactor, factory)
expectedMessage = self.getExpectedStartListeningLogMessage(
p, "Crazy Factory")
self.assertEqual((expectedMessage,), loggedMessages[0]['message'])
def test_connectionLostLogMsg(self):
"""
When a connection is lost, an informative message should be logged
(see L{getExpectedConnectionLostLogMsg}): an address identifying
the port and the fact that it was closed.
"""
loggedMessages = []
def logConnectionLostMsg(eventDict):
loggedMessages.append(log.textFromEventDict(eventDict))
reactor = self.buildReactor()
p = self.getListeningPort(reactor, ServerFactory())
expectedMessage = self.getExpectedConnectionLostLogMsg(p)
log.addObserver(logConnectionLostMsg)
def stopReactor(ignored):
log.removeObserver(logConnectionLostMsg)
reactor.stop()
def doStopListening():
log.addObserver(logConnectionLostMsg)
maybeDeferred(p.stopListening).addCallback(stopReactor)
reactor.callWhenRunning(doStopListening)
reactor.run()
self.assertIn(expectedMessage, loggedMessages)
def test_allNewStyle(self):
"""
The L{IListeningPort} object is an instance of a class with no
classic classes in its hierarchy.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory())
self.assertFullyNewStyle(port)
class ListenTCPMixin(object):
"""
Mixin which uses L{IReactorTCP.listenTCP} to hand out listening TCP ports.
"""
def getListeningPort(self, reactor, factory, port=0, interface=''):
"""
Get a TCP port from a reactor.
"""
return reactor.listenTCP(port, factory, interface=interface)
class SocketTCPMixin(object):
"""
Mixin which uses L{IReactorSocket.adoptStreamPort} to hand out listening TCP
ports.
"""
def getListeningPort(self, reactor, factory, port=0, interface=''):
"""
Get a TCP port from a reactor, wrapping an already-initialized file
descriptor.
"""
if IReactorSocket.providedBy(reactor):
if ':' in interface:
domain = socket.AF_INET6
address = socket.getaddrinfo(interface, port)[0][4]
else:
domain = socket.AF_INET
address = (interface, port)
portSock = socket.socket(domain)
portSock.bind(address)
portSock.listen(3)
portSock.setblocking(False)
try:
return reactor.adoptStreamPort(
portSock.fileno(), portSock.family, factory)
finally:
# The socket should still be open; fileno will raise if it is
# not.
portSock.fileno()
# Now clean it up, because the rest of the test does not need
# it.
portSock.close()
else:
raise SkipTest("Reactor does not provide IReactorSocket")
class TCPPortTestsMixin(object):
"""
Tests for L{IReactorTCP.listenTCP}
"""
requiredInterfaces = (IReactorTCP,)
def getExpectedStartListeningLogMessage(self, port, factory):
"""
Get the message expected to be logged when a TCP port starts listening.
"""
return "%s starting on %d" % (
factory, port.getHost().port)
def getExpectedConnectionLostLogMsg(self, port):
"""
Get the expected connection lost message for a TCP port.
"""
return "(TCP Port %s Closed)" % (port.getHost().port,)
def test_portGetHostOnIPv4(self):
"""
When no interface is passed to L{IReactorTCP.listenTCP}, the returned
listening port listens on an IPv4 address.
"""
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory())
address = port.getHost()
self.assertIsInstance(address, IPv4Address)
def test_portGetHostOnIPv6(self):
"""
When listening on an IPv6 address, L{IListeningPort.getHost} returns
an L{IPv6Address} with C{host} and C{port} attributes reflecting the
address the port is bound to.
"""
reactor = self.buildReactor()
host, portNumber = findFreePort(
family=socket.AF_INET6, interface='::1')[:2]
port = self.getListeningPort(
reactor, ServerFactory(), portNumber, host)
address = port.getHost()
self.assertIsInstance(address, IPv6Address)
self.assertEqual('::1', address.host)
self.assertEqual(portNumber, address.port)
if ipv6Skip:
test_portGetHostOnIPv6.skip = ipv6Skip
def test_portGetHostOnIPv6ScopeID(self):
"""
When a link-local IPv6 address including a scope identifier is passed as
the C{interface} argument to L{IReactorTCP.listenTCP}, the resulting
L{IListeningPort} reports its address as an L{IPv6Address} with a host
value that includes the scope identifier.
"""
linkLocal = getLinkLocalIPv6Address()
reactor = self.buildReactor()
port = self.getListeningPort(reactor, ServerFactory(), 0, linkLocal)
address = port.getHost()
self.assertIsInstance(address, IPv6Address)
self.assertEqual(linkLocal, address.host)
if ipv6Skip:
test_portGetHostOnIPv6ScopeID.skip = ipv6Skip
def _buildProtocolAddressTest(self, client, interface):
"""
Connect C{client} to a server listening on C{interface} started with
L{IReactorTCP.listenTCP} and return the address passed to the factory's
C{buildProtocol} method.
@param client: A C{SOCK_STREAM} L{socket.socket} created with an address
family such that it will be able to connect to a server listening on
C{interface}.
@param interface: A C{str} giving an address for a server to listen on.
This should almost certainly be the loopback address for some
address family supported by L{IReactorTCP.listenTCP}.
@return: Whatever object, probably an L{IAddress} provider, is passed to
a server factory's C{buildProtocol} method when C{client}
establishes a connection.
"""
class ObserveAddress(ServerFactory):
def buildProtocol(self, address):
reactor.stop()
self.observedAddress = address
return Protocol()
factory = ObserveAddress()
reactor = self.buildReactor()
port = self.getListeningPort(reactor, factory, 0, interface)
client.setblocking(False)
try:
connect(client, (port.getHost().host, port.getHost().port))
except socket.error as e:
errnum, message = e.args
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
self.runReactor(reactor)
return factory.observedAddress
def test_buildProtocolIPv4Address(self):
"""
When a connection is accepted over IPv4, an L{IPv4Address} is passed
to the factory's C{buildProtocol} method giving the peer's address.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv4Address('TCP', *client.getsockname()), observedAddress)
def test_buildProtocolIPv6Address(self):
"""
When a connection is accepted to an IPv6 address, an L{IPv6Address} is
passed to the factory's C{buildProtocol} method giving the peer's
address.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), observedAddress)
if ipv6Skip:
test_buildProtocolIPv6Address.skip = ipv6Skip
def test_buildProtocolIPv6AddressScopeID(self):
"""
When a connection is accepted to a link-local IPv6 address, an
L{IPv6Address} is passed to the factory's C{buildProtocol} method
giving the peer's address, including a scope identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
observedAddress = self._buildProtocolAddressTest(client, interface)
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), observedAddress)
if ipv6Skip:
test_buildProtocolIPv6AddressScopeID.skip = ipv6Skip
def _serverGetConnectionAddressTest(self, client, interface, which):
"""
Connect C{client} to a server listening on C{interface} started with
L{IReactorTCP.listenTCP} and return the address returned by one of the
server transport's address lookup methods, C{getHost} or C{getPeer}.
@param client: A C{SOCK_STREAM} L{socket.socket} created with an address
family such that it will be able to connect to a server listening on
C{interface}.
@param interface: A C{str} giving an address for a server to listen on.
This should almost certainly be the loopback address for some
address family supported by L{IReactorTCP.listenTCP}.
@param which: A C{str} equal to either C{"getHost"} or C{"getPeer"}
determining which address will be returned.
@return: Whatever object, probably an L{IAddress} provider, is returned
from the method indicated by C{which}.
"""
class ObserveAddress(Protocol):
def makeConnection(self, transport):
reactor.stop()
self.factory.address = getattr(transport, which)()
reactor = self.buildReactor()
factory = ServerFactory()
factory.protocol = ObserveAddress
port = self.getListeningPort(reactor, factory, 0, interface)
client.setblocking(False)
try:
connect(client, (port.getHost().host, port.getHost().port))
except socket.error as e:
errnum, message = e.args
self.assertIn(errnum, (errno.EINPROGRESS, errno.EWOULDBLOCK))
self.runReactor(reactor)
return factory.address
def test_serverGetHostOnIPv4(self):
"""
When a connection is accepted over IPv4, the server
L{ITransport.getHost} method returns an L{IPv4Address} giving the
address on which the server accepted the connection.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv4Address('TCP', *client.getpeername()), hostAddress)
def test_serverGetHostOnIPv6(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getHost} method returns an L{IPv6Address} giving the
address on which the server accepted the connection.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv6Address('TCP', *client.getpeername()[:2]), hostAddress)
if ipv6Skip:
test_serverGetHostOnIPv6.skip = ipv6Skip
def test_serverGetHostOnIPv6ScopeID(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getHost} method returns an L{IPv6Address} giving the
address on which the server accepted the connection, including the scope
identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
hostAddress = self._serverGetConnectionAddressTest(
client, interface, 'getHost')
self.assertEqual(
IPv6Address('TCP', *client.getpeername()[:2]), hostAddress)
if ipv6Skip:
test_serverGetHostOnIPv6ScopeID.skip = ipv6Skip
def test_serverGetPeerOnIPv4(self):
"""
When a connection is accepted over IPv4, the server
L{ITransport.getPeer} method returns an L{IPv4Address} giving the
address of the remote end of the connection.
"""
interface = '127.0.0.1'
client = createTestSocket(self, socket.AF_INET, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv4Address('TCP', *client.getsockname()), peerAddress)
def test_serverGetPeerOnIPv6(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getPeer} method returns an L{IPv6Address} giving the
address on the remote end of the connection.
"""
interface = '::1'
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), peerAddress)
if ipv6Skip:
test_serverGetPeerOnIPv6.skip = ipv6Skip
def test_serverGetPeerOnIPv6ScopeID(self):
"""
When a connection is accepted over IPv6, the server
L{ITransport.getPeer} method returns an L{IPv6Address} giving the
address on the remote end of the connection, including the scope
identifier.
"""
interface = getLinkLocalIPv6Address()
client = createTestSocket(self, socket.AF_INET6, socket.SOCK_STREAM)
peerAddress = self._serverGetConnectionAddressTest(
client, interface, 'getPeer')
self.assertEqual(
IPv6Address('TCP', *client.getsockname()[:2]), peerAddress)
if ipv6Skip:
test_serverGetPeerOnIPv6ScopeID.skip = ipv6Skip
class TCPPortTestsBuilder(ReactorBuilder, ListenTCPMixin, TCPPortTestsMixin,
ObjectModelIntegrationMixin,
StreamTransportTestsMixin):
pass
class TCPFDPortTestsBuilder(ReactorBuilder, SocketTCPMixin, TCPPortTestsMixin,
ObjectModelIntegrationMixin,
StreamTransportTestsMixin):
pass
class StopStartReadingProtocol(Protocol):
"""
Protocol that pauses and resumes the transport a few times
"""
def connectionMade(self):
self.data = b''
self.pauseResumeProducing(3)
def pauseResumeProducing(self, counter):
"""
Toggle transport read state, then count down.
"""
self.transport.pauseProducing()
self.transport.resumeProducing()
if counter:
self.factory.reactor.callLater(0,
self.pauseResumeProducing, counter - 1)
else:
self.factory.reactor.callLater(0,
self.factory.ready.callback, self)
def dataReceived(self, data):
log.msg('got data', len(data))
self.data += data
if len(self.data) == 4*4096:
self.factory.stop.callback(self.data)
def oneTransportTest(testMethod):
"""
Decorate a L{ReactorBuilder} test function which tests one reactor and one
connected transport. Run that test method in the context of
C{connectionMade}, and immediately drop the connection (and end the test)
when that completes.
@param testMethod: A unit test method on a L{ReactorBuilder} test suite;
taking two additional parameters; a C{reactor} as built by the
L{ReactorBuilder}, and an L{ITCPTransport} provider.
@type testMethod: 3-argument C{function}
@return: a no-argument test method.
@rtype: 1-argument C{function}
"""
@wraps(testMethod)
def actualTestMethod(builder):
other = ConnectableProtocol()
class ServerProtocol(ConnectableProtocol):
def connectionMade(self):
try:
testMethod(builder, self.reactor, self.transport)
finally:
if self.transport is not None:
self.transport.loseConnection()
if other.transport is not None:
other.transport.loseConnection()
serverProtocol = ServerProtocol()
runProtocolsWithReactor(builder, serverProtocol, other, TCPCreator())
return actualTestMethod
def assertReading(testCase, reactor, transport):
"""
Use the given test to assert that the given transport is actively reading
in the given reactor.
@note: Maintainers; for more information on why this is a function rather
than a method on a test case, see U{this document on how we structure
test tools
<http://twistedmatrix.com/trac/wiki/Design/KeepTestToolsOutOfFixtures>}
@param testCase: a test case to perform the assertion upon.
@type testCase: L{TestCase}
@param reactor: A reactor, possibly one providing L{IReactorFDSet}, or an
IOCP reactor.
@param transport: An L{ITCPTransport}
"""
if IReactorFDSet.providedBy(reactor):
testCase.assertIn(transport, reactor.getReaders())
else:
# IOCP.
testCase.assertIn(transport, reactor.handles)
testCase.assertTrue(transport.reading)
def assertNotReading(testCase, reactor, transport):
"""
Use the given test to assert that the given transport is I{not} actively
reading in the given reactor.
@note: Maintainers; for more information on why this is a function rather
than a method on a test case, see U{this document on how we structure
test tools
<http://twistedmatrix.com/trac/wiki/Design/KeepTestToolsOutOfFixtures>}
@param testCase: a test case to perform the assertion upon.
@type testCase: L{TestCase}
@param reactor: A reactor, possibly one providing L{IReactorFDSet}, or an
IOCP reactor.
@param transport: An L{ITCPTransport}
"""
if IReactorFDSet.providedBy(reactor):
testCase.assertNotIn(transport, reactor.getReaders())
else:
# IOCP.
testCase.assertFalse(transport.reading)
class TCPConnectionTestsBuilder(ReactorBuilder):
"""
Builder defining tests relating to L{twisted.internet.tcp.Connection}.
"""
requiredInterfaces = (IReactorTCP,)
def test_stopStartReading(self):
"""
This test verifies transport socket read state after multiple
pause/resumeProducing calls.
"""
sf = ServerFactory()
reactor = sf.reactor = self.buildReactor()
skippedReactors = ["Glib2Reactor", "Gtk2Reactor"]
reactorClassName = reactor.__class__.__name__
if reactorClassName in skippedReactors and platform.isWindows():
raise SkipTest(
"This test is broken on gtk/glib under Windows.")
sf.protocol = StopStartReadingProtocol
sf.ready = Deferred()
sf.stop = Deferred()
p = reactor.listenTCP(0, sf)
port = p.getHost().port
def proceed(protos, port):
"""
Send several IOCPReactor's buffers' worth of data.
"""
self.assertTrue(protos[0])
self.assertTrue(protos[1])
protos = protos[0][1], protos[1][1]
protos[0].transport.write(b'x' * (2 * 4096) + b'y' * (2 * 4096))
return (sf.stop.addCallback(cleanup, protos, port)
.addCallback(lambda ign: reactor.stop()))
def cleanup(data, protos, port):
"""
Make sure IOCPReactor didn't start several WSARecv operations
that clobbered each other's results.
"""
self.assertEqual(data, b'x'*(2*4096) + b'y'*(2*4096),
'did not get the right data')
return DeferredList([
maybeDeferred(protos[0].transport.loseConnection),
maybeDeferred(protos[1].transport.loseConnection),
maybeDeferred(port.stopListening)])
cc = TCP4ClientEndpoint(reactor, '127.0.0.1', port)
cf = ClientFactory()
cf.protocol = Protocol
d = DeferredList([cc.connect(cf), sf.ready]).addCallback(proceed, p)
d.addErrback(log.err)
self.runReactor(reactor)
@oneTransportTest
def test_resumeProducing(self, reactor, server):
"""
When a L{Server} is connected, its C{resumeProducing} method adds it as
a reader to the reactor.
"""
server.pauseProducing()
assertNotReading(self, reactor, server)
server.resumeProducing()
assertReading(self, reactor, server)
@oneTransportTest
def test_resumeProducingWhileDisconnecting(self, reactor, server):
"""
When a L{Server} has already started disconnecting via
C{loseConnection}, its C{resumeProducing} method does not add it as a
reader to its reactor.
"""
server.loseConnection()
server.resumeProducing()
assertNotReading(self, reactor, server)
@oneTransportTest
def test_resumeProducingWhileDisconnected(self, reactor, server):
"""
When a L{Server} has already lost its connection, its
C{resumeProducing} method does not add it as a reader to its reactor.
"""
server.connectionLost(Failure(Exception("dummy")))
assertNotReading(self, reactor, server)
server.resumeProducing()
assertNotReading(self, reactor, server)
def test_connectionLostAfterPausedTransport(self):
"""
Alice connects to Bob. Alice writes some bytes and then shuts down the
connection. Bob receives the bytes from the connection and then pauses
the transport object. Shortly afterwards Bob resumes the transport
object. At that point, Bob is notified that the connection has been
closed.
This is no problem for most reactors. The underlying event notification
API will probably just remind them that the connection has been closed.
It is a little tricky for win32eventreactor (MsgWaitForMultipleObjects).
MsgWaitForMultipleObjects will only deliver the close notification once.
The reactor needs to remember that notification until Bob resumes the
transport.
"""
class Pauser(ConnectableProtocol):
def __init__(self):
self.events = []
def dataReceived(self, bytes):
self.events.append("paused")
self.transport.pauseProducing()
self.reactor.callLater(0, self.resume)
def resume(self):
self.events.append("resumed")
self.transport.resumeProducing()
def connectionLost(self, reason):
# This is the event you have been waiting for.
self.events.append("lost")
ConnectableProtocol.connectionLost(self, reason)
class Client(ConnectableProtocol):
def connectionMade(self):
self.transport.write(b"some bytes for you")
self.transport.loseConnection()
pauser = Pauser()
runProtocolsWithReactor(self, pauser, Client(), TCPCreator())
self.assertEqual(pauser.events, ["paused", "resumed", "lost"])
def test_doubleHalfClose(self):
"""
If one side half-closes its connection, and then the other side of the
connection calls C{loseWriteConnection}, and then C{loseConnection} in
{writeConnectionLost}, the connection is closed correctly.
This rather obscure case used to fail (see ticket #3037).
"""
@implementer(IHalfCloseableProtocol)
class ListenerProtocol(ConnectableProtocol):
def readConnectionLost(self):
self.transport.loseWriteConnection()
def writeConnectionLost(self):
self.transport.loseConnection()
class Client(ConnectableProtocol):
def connectionMade(self):
self.transport.loseConnection()
# If test fails, reactor won't stop and we'll hit timeout:
runProtocolsWithReactor(
self, ListenerProtocol(), Client(), TCPCreator())
class WriteSequenceTestsMixin(object):
"""
Test for L{twisted.internet.abstract.FileDescriptor.writeSequence}.
"""
requiredInterfaces = (IReactorTCP,)
def setWriteBufferSize(self, transport, value):
"""
Set the write buffer size for the given transport, mananing possible
differences (ie, IOCP). Bug #4322 should remove the need of that hack.
"""
if getattr(transport, "writeBufferSize", None) is not None:
transport.writeBufferSize = value
else:
transport.bufferSize = value
def test_writeSequeceWithoutWrite(self):
"""
C{writeSequence} sends the data even if C{write} hasn't been called.
"""
def connected(protocols):
client, server, port = protocols
def dataReceived(data):
log.msg("data received: %r" % data)
self.assertEqual(data, b"Some sequence splitted")
client.transport.loseConnection()
server.dataReceived = dataReceived
client.transport.writeSequence([b"Some ", b"sequence ", b"splitted"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_writeSequenceWithUnicodeRaisesException(self):
"""
C{writeSequence} with an element in the sequence of type unicode raises
C{TypeError}.
"""
def connected(protocols):
client, server, port = protocols
exc = self.assertRaises(
TypeError,
server.transport.writeSequence, [u"Unicode is not kosher"])
self.assertEqual(str(exc), "Data must not be unicode")
server.transport.loseConnection()
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_streamingProducer(self):
"""
C{writeSequence} pauses its streaming producer if too much data is
buffered, and then resumes it.
"""
@implementer(IPushProducer)
class SaveActionProducer(object):
client = None
server = None
def __init__(self):
self.actions = []
def pauseProducing(self):
self.actions.append("pause")
def resumeProducing(self):
self.actions.append("resume")
# Unregister the producer so the connection can close
self.client.transport.unregisterProducer()
# This is why the code below waits for the server connection
# first - so we have it to close here. We close the server
# side because win32evenreactor cannot reliably observe us
# closing the client side (#5285).
self.server.transport.loseConnection()
def stopProducing(self):
self.actions.append("stop")
producer = SaveActionProducer()
def connected(protocols):
client, server = protocols[:2]
producer.client = client
producer.server = server
# Register a streaming producer and verify that it gets paused
# after it writes more than the local send buffer can hold.
client.transport.registerProducer(producer, True)
self.assertEqual(producer.actions, [])
self.setWriteBufferSize(client.transport, 500)
client.transport.writeSequence([b"x" * 50] * 20)
self.assertEqual(producer.actions, ["pause"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
# After the send buffer gets a chance to empty out a bit, the producer
# should be resumed.
self.assertEqual(producer.actions, ["pause", "resume"])
def test_nonStreamingProducer(self):
"""
C{writeSequence} pauses its producer if too much data is buffered only
if this is a streaming producer.
"""
test = self
@implementer(IPullProducer)
class SaveActionProducer(object):
client = None
def __init__(self):
self.actions = []
def resumeProducing(self):
self.actions.append("resume")
if self.actions.count("resume") == 2:
self.client.transport.stopConsuming()
else:
test.setWriteBufferSize(self.client.transport, 500)
self.client.transport.writeSequence([b"x" * 50] * 20)
def stopProducing(self):
self.actions.append("stop")
producer = SaveActionProducer()
def connected(protocols):
client = protocols[0]
producer.client = client
# Register a non-streaming producer and verify that it is resumed
# immediately.
client.transport.registerProducer(producer, False)
self.assertEqual(producer.actions, ["resume"])
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, "127.0.0.1",
socket.AF_INET)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
# After the local send buffer empties out, the producer should be
# resumed again.
self.assertEqual(producer.actions, ["resume", "resume"])
class TCPTransportServerAddressTestMixin(object):
"""
Test mixing for TCP server address building and log prefix.
"""
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Helper method returnine a L{Deferred} firing with a tuple of a client
protocol, a server protocol, and a running TCP port.
"""
raise NotImplementedError()
def _testServerAddress(self, interface, addressFamily, adressClass):
"""
Helper method to test TCP server addresses on either IPv4 or IPv6.
"""
def connected(protocols):
client, server, port = protocols
try:
self.assertEqual(
"<AccumulatingProtocol #%s on %s>" %
(server.transport.sessionno, port.getHost().port),
str(server.transport))
self.assertEqual(
"AccumulatingProtocol,%s,%s" %
(server.transport.sessionno, interface),
server.transport.logstr)
[peerAddress] = server.factory.peerAddresses
self.assertIsInstance(peerAddress, adressClass)
self.assertEqual('TCP', peerAddress.type)
self.assertEqual(interface, peerAddress.host)
finally:
# Be certain to drop the connection so the test completes.
server.transport.loseConnection()
reactor = self.buildReactor()
d = self.getConnectedClientAndServer(reactor, interface, addressFamily)
d.addCallback(connected)
d.addErrback(log.err)
self.runReactor(reactor)
def test_serverAddressTCP4(self):
"""
L{Server} instances have a string representation indicating on which
port they're running, and the connected address is stored on the
C{peerAddresses} attribute of the factory.
"""
return self._testServerAddress("127.0.0.1", socket.AF_INET,
IPv4Address)
def test_serverAddressTCP6(self):
"""
IPv6 L{Server} instances have a string representation indicating on
which port they're running, and the connected address is stored on the
C{peerAddresses} attribute of the factory.
"""
return self._testServerAddress(getLinkLocalIPv6Address(),
socket.AF_INET6, IPv6Address)
if ipv6Skip:
test_serverAddressTCP6.skip = ipv6Skip
class TCPTransportTestsBuilder(TCPTransportServerAddressTestMixin,
WriteSequenceTestsMixin, ReactorBuilder):
"""
Test standard L{ITCPTransport}s built with C{listenTCP} and C{connectTCP}.
"""
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Return a L{Deferred} firing with a L{MyClientFactory} and
L{MyServerFactory} connected pair, and the listening C{Port}.
"""
server = MyServerFactory()
server.protocolConnectionMade = Deferred()
server.protocolConnectionLost = Deferred()
client = MyClientFactory()
client.protocolConnectionMade = Deferred()
client.protocolConnectionLost = Deferred()
port = reactor.listenTCP(0, server, interface=interface)
lostDeferred = gatherResults([client.protocolConnectionLost,
server.protocolConnectionLost])
def stop(result):
reactor.stop()
return result
lostDeferred.addBoth(stop)
startDeferred = gatherResults([client.protocolConnectionMade,
server.protocolConnectionMade])
deferred = Deferred()
def start(protocols):
client, server = protocols
log.msg("client connected %s" % client)
log.msg("server connected %s" % server)
deferred.callback((client, server, port))
startDeferred.addCallback(start)
reactor.connectTCP(interface, port.getHost().port, client)
return deferred
class AdoptStreamConnectionTestsBuilder(TCPTransportServerAddressTestMixin,
WriteSequenceTestsMixin,
ReactorBuilder):
"""
Test server transports built using C{adoptStreamConnection}.
"""
requiredInterfaces = (IReactorFDSet, IReactorSocket)
def getConnectedClientAndServer(self, reactor, interface, addressFamily):
"""
Return a L{Deferred} firing with a L{MyClientFactory} and
L{MyServerFactory} connected pair, and the listening C{Port}. The
particularity is that the server protocol has been obtained after doing
a C{adoptStreamConnection} against the original server connection.
"""
firstServer = MyServerFactory()
firstServer.protocolConnectionMade = Deferred()
server = MyServerFactory()
server.protocolConnectionMade = Deferred()
server.protocolConnectionLost = Deferred()
client = MyClientFactory()
client.protocolConnectionMade = Deferred()
client.protocolConnectionLost = Deferred()
port = reactor.listenTCP(0, firstServer, interface=interface)
def firtServerConnected(proto):
reactor.removeReader(proto.transport)
reactor.removeWriter(proto.transport)
reactor.adoptStreamConnection(
proto.transport.fileno(), addressFamily, server)
firstServer.protocolConnectionMade.addCallback(firtServerConnected)
lostDeferred = gatherResults([client.protocolConnectionLost,
server.protocolConnectionLost])
def stop(result):
if reactor.running:
reactor.stop()
return result
lostDeferred.addBoth(stop)
deferred = Deferred()
deferred.addErrback(stop)
startDeferred = gatherResults([client.protocolConnectionMade,
server.protocolConnectionMade])
def start(protocols):
client, server = protocols
log.msg("client connected %s" % client)
log.msg("server connected %s" % server)
deferred.callback((client, server, port))
startDeferred.addCallback(start)
reactor.connectTCP(interface, port.getHost().port, client)
return deferred
globals().update(TCP4ClientTestsBuilder.makeTestCaseClasses())
globals().update(TCP6ClientTestsBuilder.makeTestCaseClasses())
globals().update(TCPPortTestsBuilder.makeTestCaseClasses())
globals().update(TCPFDPortTestsBuilder.makeTestCaseClasses())
globals().update(TCPConnectionTestsBuilder.makeTestCaseClasses())
globals().update(TCP4ConnectorTestsBuilder.makeTestCaseClasses())
globals().update(TCP6ConnectorTestsBuilder.makeTestCaseClasses())
globals().update(TCPTransportTestsBuilder.makeTestCaseClasses())
globals().update(AdoptStreamConnectionTestsBuilder.makeTestCaseClasses())
class ServerAbortsTwice(ConnectableProtocol):
"""
Call abortConnection() twice.
"""
def dataReceived(self, data):
self.transport.abortConnection()
self.transport.abortConnection()
class ServerAbortsThenLoses(ConnectableProtocol):
"""
Call abortConnection() followed by loseConnection().
"""
def dataReceived(self, data):
self.transport.abortConnection()
self.transport.loseConnection()
class AbortServerWritingProtocol(ConnectableProtocol):
"""
Protocol that writes data upon connection.
"""
def connectionMade(self):
"""
Tell the client that the connection is set up and it's time to abort.
"""
self.transport.write(b"ready")
class ReadAbortServerProtocol(AbortServerWritingProtocol):
"""
Server that should never receive any data, except 'X's which are written
by the other side of the connection before abortConnection, and so might
possibly arrive.
"""
def dataReceived(self, data):
if data.replace(b'X', b''):
raise Exception("Unexpectedly received data.")
class NoReadServer(ConnectableProtocol):
"""
Stop reading immediately on connection.
This simulates a lost connection that will cause the other side to time
out, and therefore call abortConnection().
"""
def connectionMade(self):
self.transport.stopReading()
class EventualNoReadServer(ConnectableProtocol):
"""
Like NoReadServer, except we Wait until some bytes have been delivered
before stopping reading. This means TLS handshake has finished, where
applicable.
"""
gotData = False
stoppedReading = False
def dataReceived(self, data):
if not self.gotData:
self.gotData = True
self.transport.registerProducer(self, False)
self.transport.write(b"hello")
def resumeProducing(self):
if self.stoppedReading:
return
self.stoppedReading = True
# We've written out the data:
self.transport.stopReading()
def pauseProducing(self):
pass
def stopProducing(self):
pass
class BaseAbortingClient(ConnectableProtocol):
"""
Base class for abort-testing clients.
"""
inReactorMethod = False
def connectionLost(self, reason):
if self.inReactorMethod:
raise RuntimeError("BUG: connectionLost was called re-entrantly!")
ConnectableProtocol.connectionLost(self, reason)
class WritingButNotAbortingClient(BaseAbortingClient):
"""
Write data, but don't abort.
"""
def connectionMade(self):
self.transport.write(b"hello")
class AbortingClient(BaseAbortingClient):
"""
Call abortConnection() after writing some data.
"""
def dataReceived(self, data):
"""
Some data was received, so the connection is set up.
"""
self.inReactorMethod = True
self.writeAndAbort()
self.inReactorMethod = False
def writeAndAbort(self):
# X is written before abortConnection, and so there is a chance it
# might arrive. Y is written after, and so no Ys should ever be
# delivered:
self.transport.write(b"X" * 10000)
self.transport.abortConnection()
self.transport.write(b"Y" * 10000)
class AbortingTwiceClient(AbortingClient):
"""
Call abortConnection() twice, after writing some data.
"""
def writeAndAbort(self):
AbortingClient.writeAndAbort(self)
self.transport.abortConnection()
class AbortingThenLosingClient(AbortingClient):
"""
Call abortConnection() and then loseConnection().
"""
def writeAndAbort(self):
AbortingClient.writeAndAbort(self)
self.transport.loseConnection()
class ProducerAbortingClient(ConnectableProtocol):
"""
Call abortConnection from doWrite, via resumeProducing.
"""
inReactorMethod = True
producerStopped = False
def write(self):
self.transport.write(b"lalala" * 127000)
self.inRegisterProducer = True
self.transport.registerProducer(self, False)
self.inRegisterProducer = False
def connectionMade(self):
self.write()
def resumeProducing(self):
self.inReactorMethod = True
if not self.inRegisterProducer:
self.transport.abortConnection()
self.inReactorMethod = False
def stopProducing(self):
self.producerStopped = True
def connectionLost(self, reason):
if not self.producerStopped:
raise RuntimeError("BUG: stopProducing() was never called.")
if self.inReactorMethod:
raise RuntimeError("BUG: connectionLost called re-entrantly!")
ConnectableProtocol.connectionLost(self, reason)
class StreamingProducerClient(ConnectableProtocol):
"""
Call abortConnection() when the other side has stopped reading.
In particular, we want to call abortConnection() only once our local
socket hits a state where it is no longer writeable. This helps emulate
the most common use case for abortConnection(), closing a connection after
a timeout, with write buffers being full.
Since it's very difficult to know when this actually happens, we just
write a lot of data, and assume at that point no more writes will happen.
"""
paused = False
extraWrites = 0
inReactorMethod = False
def connectionMade(self):
self.write()
def write(self):
"""
Write large amount to transport, then wait for a while for buffers to
fill up.
"""
self.transport.registerProducer(self, True)
for i in range(100):
self.transport.write(b"1234567890" * 32000)
def resumeProducing(self):
self.paused = False
def stopProducing(self):
pass
def pauseProducing(self):
"""
Called when local buffer fills up.
The goal is to hit the point where the local file descriptor is not
writeable (or the moral equivalent). The fact that pauseProducing has
been called is not sufficient, since that can happen when Twisted's
buffers fill up but OS hasn't gotten any writes yet. We want to be as
close as possible to every buffer (including OS buffers) being full.
So, we wait a bit more after this for Twisted to write out a few
chunks, then abortConnection.
"""
if self.paused:
return
self.paused = True
# The amount we wait is arbitrary, we just want to make sure some
# writes have happened and outgoing OS buffers filled up -- see
# http://twistedmatrix.com/trac/ticket/5303 for details:
self.reactor.callLater(0.01, self.doAbort)
def doAbort(self):
if not self.paused:
log.err(RuntimeError("BUG: We should be paused a this point."))
self.inReactorMethod = True
self.transport.abortConnection()
self.inReactorMethod = False
def connectionLost(self, reason):
# Tell server to start reading again so it knows to go away:
self.otherProtocol.transport.startReading()
ConnectableProtocol.connectionLost(self, reason)
class StreamingProducerClientLater(StreamingProducerClient):
"""
Call abortConnection() from dataReceived, after bytes have been
exchanged.
"""
def connectionMade(self):
self.transport.write(b"hello")
self.gotData = False
def dataReceived(self, data):
if not self.gotData:
self.gotData = True
self.write()
class ProducerAbortingClientLater(ProducerAbortingClient):
"""
Call abortConnection from doWrite, via resumeProducing.
Try to do so after some bytes have already been exchanged, so we
don't interrupt SSL handshake.
"""
def connectionMade(self):
# Override base class connectionMade().
pass
def dataReceived(self, data):
self.write()
class DataReceivedRaisingClient(AbortingClient):
"""
Call abortConnection(), and then throw exception, from dataReceived.
"""
def dataReceived(self, data):
self.transport.abortConnection()
raise ZeroDivisionError("ONO")
class ResumeThrowsClient(ProducerAbortingClient):
"""
Call abortConnection() and throw exception from resumeProducing().
"""
def resumeProducing(self):
if not self.inRegisterProducer:
self.transport.abortConnection()
raise ZeroDivisionError("ono!")
def connectionLost(self, reason):
# Base class assertion about stopProducing being called isn't valid;
# if the we blew up in resumeProducing, consumers are justified in
# giving up on the producer and not calling stopProducing.
ConnectableProtocol.connectionLost(self, reason)
class AbortConnectionMixin(object):
"""
Unit tests for L{ITransport.abortConnection}.
"""
# Override in subclasses, should be a EndpointCreator instance:
endpoints = None
def runAbortTest(self, clientClass, serverClass,
clientConnectionLostReason=None):
"""
A test runner utility function, which hooks up a matched pair of client
and server protocols.
We then run the reactor until both sides have disconnected, and then
verify that the right exception resulted.
"""
clientExpectedExceptions = (ConnectionAborted, ConnectionLost)
serverExpectedExceptions = (ConnectionLost, ConnectionDone)
# In TLS tests we may get SSL.Error instead of ConnectionLost,
# since we're trashing the TLS protocol layer.
if useSSL:
clientExpectedExceptions = clientExpectedExceptions + (SSL.Error,)
serverExpectedExceptions = serverExpectedExceptions + (SSL.Error,)
client = clientClass()
server = serverClass()
client.otherProtocol = server
server.otherProtocol = client
reactor = runProtocolsWithReactor(self, server, client, self.endpoints)
# Make sure everything was shutdown correctly:
self.assertEqual(reactor.removeAll(), [])
self.assertEqual(reactor.getDelayedCalls(), [])
if clientConnectionLostReason is not None:
self.assertIsInstance(
client.disconnectReason.value,
(clientConnectionLostReason,) + clientExpectedExceptions)
else:
self.assertIsInstance(client.disconnectReason.value,
clientExpectedExceptions)
self.assertIsInstance(server.disconnectReason.value, serverExpectedExceptions)
def test_dataReceivedAbort(self):
"""
abortConnection() is called in dataReceived. The protocol should be
disconnected, but connectionLost should not be called re-entrantly.
"""
return self.runAbortTest(AbortingClient, ReadAbortServerProtocol)
def test_clientAbortsConnectionTwice(self):
"""
abortConnection() is called twice by client.
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(AbortingTwiceClient, ReadAbortServerProtocol)
def test_clientAbortsConnectionThenLosesConnection(self):
"""
Client calls abortConnection(), followed by loseConnection().
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(AbortingThenLosingClient,
ReadAbortServerProtocol)
def test_serverAbortsConnectionTwice(self):
"""
abortConnection() is called twice by server.
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(WritingButNotAbortingClient, ServerAbortsTwice,
clientConnectionLostReason=ConnectionLost)
def test_serverAbortsConnectionThenLosesConnection(self):
"""
Server calls abortConnection(), followed by loseConnection().
No exception should be thrown, and the connection will be closed.
"""
return self.runAbortTest(WritingButNotAbortingClient,
ServerAbortsThenLoses,
clientConnectionLostReason=ConnectionLost)
def test_resumeProducingAbort(self):
"""
abortConnection() is called in resumeProducing, before any bytes have
been exchanged. The protocol should be disconnected, but
connectionLost should not be called re-entrantly.
"""
self.runAbortTest(ProducerAbortingClient,
ConnectableProtocol)
def test_resumeProducingAbortLater(self):
"""
abortConnection() is called in resumeProducing, after some
bytes have been exchanged. The protocol should be disconnected.
"""
return self.runAbortTest(ProducerAbortingClientLater,
AbortServerWritingProtocol)
def test_fullWriteBuffer(self):
"""
abortConnection() triggered by the write buffer being full.
In particular, the server side stops reading. This is supposed
to simulate a realistic timeout scenario where the client
notices the server is no longer accepting data.
The protocol should be disconnected, but connectionLost should not be
called re-entrantly.
"""
self.runAbortTest(StreamingProducerClient,
NoReadServer)
def test_fullWriteBufferAfterByteExchange(self):
"""
abortConnection() is triggered by a write buffer being full.
However, this buffer is filled after some bytes have been exchanged,
allowing a TLS handshake if we're testing TLS. The connection will
then be lost.
"""
return self.runAbortTest(StreamingProducerClientLater,
EventualNoReadServer)
def test_dataReceivedThrows(self):
"""
dataReceived calls abortConnection(), and then raises an exception.
The connection will be lost, with the thrown exception
(C{ZeroDivisionError}) as the reason on the client. The idea here is
that bugs should not be masked by abortConnection, in particular
unexpected exceptions.
"""
self.runAbortTest(DataReceivedRaisingClient,
AbortServerWritingProtocol,
clientConnectionLostReason=ZeroDivisionError)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
def test_resumeProducingThrows(self):
"""
resumeProducing calls abortConnection(), and then raises an exception.
The connection will be lost, with the thrown exception
(C{ZeroDivisionError}) as the reason on the client. The idea here is
that bugs should not be masked by abortConnection, in particular
unexpected exceptions.
"""
self.runAbortTest(ResumeThrowsClient,
ConnectableProtocol,
clientConnectionLostReason=ZeroDivisionError)
errors = self.flushLoggedErrors(ZeroDivisionError)
self.assertEqual(len(errors), 1)
class AbortConnectionTestCase(ReactorBuilder, AbortConnectionMixin):
"""
TCP-specific L{AbortConnectionMixin} tests.
"""
requiredInterfaces = (IReactorTCP,)
endpoints = TCPCreator()
globals().update(AbortConnectionTestCase.makeTestCaseClasses())
class SimpleUtilityTestCase(TestCase):
"""
Simple, direct tests for helpers within L{twisted.internet.tcp}.
"""
if ipv6Skip:
skip = ipv6Skip
def test_resolveNumericHost(self):
"""
L{_resolveIPv6} raises a L{socket.gaierror} (L{socket.EAI_NONAME}) when
invoked with a non-numeric host. (In other words, it is passing
L{socket.AI_NUMERICHOST} to L{socket.getaddrinfo} and will not
accidentally block if it receives bad input.)
"""
err = self.assertRaises(socket.gaierror, _resolveIPv6, "localhost", 1)
self.assertEqual(err.args[0], socket.EAI_NONAME)
def test_resolveNumericService(self):
"""
L{_resolveIPv6} raises a L{socket.gaierror} (L{socket.EAI_NONAME}) when
invoked with a non-numeric port. (In other words, it is passing
L{socket.AI_NUMERICSERV} to L{socket.getaddrinfo} and will not
accidentally block if it receives bad input.)
"""
err = self.assertRaises(socket.gaierror, _resolveIPv6, "::1", "http")
self.assertEqual(err.args[0], socket.EAI_NONAME)
if platform.isWindows():
test_resolveNumericService.skip = ("The AI_NUMERICSERV flag is not "
"supported by Microsoft providers.")
# http://msdn.microsoft.com/en-us/library/windows/desktop/ms738520.aspx
def test_resolveIPv6(self):
"""
L{_resolveIPv6} discovers the flow info and scope ID of an IPv6
address.
"""
result = _resolveIPv6("::1", 2)
self.assertEqual(len(result), 4)
# We can't say anything more useful about these than that they're
# integers, because the whole point of getaddrinfo is that you can never
# know a-priori know _anything_ about the network interfaces of the
# computer that you're on and you have to ask it.
self.assertIsInstance(result[2], int) # flow info
self.assertIsInstance(result[3], int) # scope id
# but, luckily, IP presentation format and what it means to be a port
# number are a little better specified.
self.assertEqual(result[:2], ("::1", 2))
| gpl-3.0 |
amith01994/intellij-community | python/lib/Lib/ConfigParser.py | 105 | 23116 | """Configuration file parser.
A setup file consists of sections, lead by a "[section]" header,
and followed by "name: value" entries, with continuations and such in
the style of RFC 822.
The option values can contain format strings which refer to other values in
the same section, or values in a special [DEFAULT] section.
For example:
something: %(dir)s/whatever
would resolve the "%(dir)s" to the value of dir. All reference
expansions are done late, on demand.
Intrinsic defaults can be specified by passing them into the
ConfigParser constructor as a dictionary.
class:
ConfigParser -- responsible for parsing a list of
configuration files, and managing the parsed database.
methods:
__init__(defaults=None)
create the parser and specify a dictionary of intrinsic defaults. The
keys must be strings, the values must be appropriate for %()s string
interpolation. Note that `__name__' is always an intrinsic default;
its value is the section's name.
sections()
return all the configuration section names, sans DEFAULT
has_section(section)
return whether the given section exists
has_option(section, option)
return whether the given option exists in the given section
options(section)
return list of configuration options for the named section
read(filenames)
read and parse the list of named configuration files, given by
name. A single filename is also allowed. Non-existing files
are ignored. Return list of successfully read files.
readfp(fp, filename=None)
read and parse one configuration file, given as a file object.
The filename defaults to fp.name; it is only used in error
messages (if fp has no `name' attribute, the string `<???>' is used).
get(section, option, raw=False, vars=None)
return a string value for the named option. All % interpolations are
expanded in the return values, based on the defaults passed into the
constructor and the DEFAULT section. Additional substitutions may be
provided using the `vars' argument, which must be a dictionary whose
contents override any pre-existing defaults.
getint(section, options)
like get(), but convert value to an integer
getfloat(section, options)
like get(), but convert value to a float
getboolean(section, options)
like get(), but convert value to a boolean (currently case
insensitively defined as 0, false, no, off for False, and 1, true,
yes, on for True). Returns False or True.
items(section, raw=False, vars=None)
return a list of tuples with (name, value) for each option
in the section.
remove_section(section)
remove the given file section and all its options
remove_option(section, option)
remove the given option from the given section
set(section, option, value)
set the given option
write(fp)
write the configuration state in .ini format
"""
import re
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
"InterpolationError", "InterpolationDepthError",
"InterpolationSyntaxError", "ParsingError",
"MissingSectionHeaderError",
"ConfigParser", "SafeConfigParser", "RawConfigParser",
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
DEFAULTSECT = "DEFAULT"
MAX_INTERPOLATION_DEPTH = 10
# exception classes
class Error(Exception):
"""Base class for ConfigParser exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
class DuplicateSectionError(Error):
"""Raised when a section is multiply-created."""
def __init__(self, section):
Error.__init__(self, "Section %r already exists" % section)
self.section = section
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
class InterpolationError(Error):
"""Base class for interpolation-related exceptions."""
def __init__(self, option, section, msg):
Error.__init__(self, msg)
self.option = option
self.section = section
class InterpolationMissingOptionError(InterpolationError):
"""A string substitution required a setting which was not available."""
def __init__(self, option, section, rawval, reference):
msg = ("Bad value substitution:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\tkey : %s\n"
"\trawval : %s\n"
% (section, option, reference, rawval))
InterpolationError.__init__(self, option, section, msg)
self.reference = reference
class InterpolationSyntaxError(InterpolationError):
"""Raised when the source text into which substitutions are made
does not conform to the required syntax."""
class InterpolationDepthError(InterpolationError):
"""Raised when substitutions are nested too deeply."""
def __init__(self, option, section, rawval):
msg = ("Value interpolation too deeply recursive:\n"
"\tsection: [%s]\n"
"\toption : %s\n"
"\trawval : %s\n"
% (section, option, rawval))
InterpolationError.__init__(self, option, section, msg)
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
def append(self, lineno, line):
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
Error.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
class RawConfigParser:
def __init__(self, defaults=None):
self._sections = {}
self._defaults = {}
if defaults:
for key, value in defaults.items():
self._defaults[self.optionxform(key)] = value
def defaults(self):
return self._defaults
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists.
"""
if section in self._sections:
raise DuplicateSectionError(section)
self._sections[section] = {}
def has_section(self, section):
"""Indicate whether the named section is present in the configuration.
The DEFAULT section is not acknowledged.
"""
return section in self._sections
def options(self, section):
"""Return a list of option names for the given section name."""
try:
opts = self._sections[section].copy()
except KeyError:
raise NoSectionError(section)
opts.update(self._defaults)
if '__name__' in opts:
del opts['__name__']
return opts.keys()
def read(self, filenames):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, basestring):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
fp = open(filename)
except IOError:
continue
self._read(fp, filename)
fp.close()
read_ok.append(filename)
return read_ok
def readfp(self, fp, filename=None):
"""Like read() but the argument must be a file-like object.
The `fp' argument must have a `readline' method. Optional
second argument is the `filename', which if not given, is
taken from fp.name. If fp has no `name' attribute, `<???>' is
used.
"""
if filename is None:
try:
filename = fp.name
except AttributeError:
filename = '<???>'
self._read(fp, filename)
def get(self, section, option):
opt = self.optionxform(option)
if section not in self._sections:
if section != DEFAULTSECT:
raise NoSectionError(section)
if opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
elif opt in self._sections[section]:
return self._sections[section][opt]
elif opt in self._defaults:
return self._defaults[opt]
else:
raise NoOptionError(option, section)
def items(self, section):
try:
d2 = self._sections[section]
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
d2 = {}
d = self._defaults.copy()
d.update(d2)
if "__name__" in d:
del d["__name__"]
return d.items()
def _get(self, section, conv, option):
return conv(self.get(section, option))
def getint(self, section, option):
return self._get(section, int, option)
def getfloat(self, section, option):
return self._get(section, float, option)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option):
v = self.get(section, option)
if v.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % v
return self._boolean_states[v.lower()]
def optionxform(self, optionstr):
return optionstr.lower()
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
if not section or section == DEFAULTSECT:
option = self.optionxform(option)
return option in self._defaults
elif section not in self._sections:
return False
else:
option = self.optionxform(option)
return (option in self._sections[section]
or option in self._defaults)
def set(self, section, option, value):
"""Set an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
sectdict[self.optionxform(option)] = value
def write(self, fp):
"""Write an .ini-format representation of the configuration state."""
if self._defaults:
fp.write("[%s]\n" % DEFAULTSECT)
for (key, value) in self._defaults.items():
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
fp.write("\n")
for section in self._sections:
fp.write("[%s]\n" % section)
for (key, value) in self._sections[section].items():
if key != "__name__":
fp.write("%s = %s\n" %
(key, str(value).replace('\n', '\n\t')))
fp.write("\n")
def remove_option(self, section, option):
"""Remove an option."""
if not section or section == DEFAULTSECT:
sectdict = self._defaults
else:
try:
sectdict = self._sections[section]
except KeyError:
raise NoSectionError(section)
option = self.optionxform(option)
existed = option in sectdict
if existed:
del sectdict[option]
return existed
def remove_section(self, section):
"""Remove a file section."""
existed = section in self._sections
if existed:
del self._sections[section]
return existed
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
def _read(self, fp, fpname):
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
e = None # None, or an exception
while True:
line = fp.readline()
if not line:
break
lineno = lineno + 1
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
mo = self.SECTCRE.match(line)
if mo:
sectname = mo.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
elif sectname == DEFAULTSECT:
cursect = self._defaults
else:
cursect = {'__name__': sectname}
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
mo = self.OPTCRE.match(line)
if mo:
optname, vi, optval = mo.group('option', 'vi', 'value')
if vi in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != -1 and optval[pos-1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not e:
e = ParsingError(fpname)
e.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if e:
raise e
class ConfigParser(RawConfigParser):
def get(self, section, option, raw=False, vars=None):
"""Get an option value for a given section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
option = self.optionxform(option)
try:
value = d[option]
except KeyError:
raise NoOptionError(option, section)
if raw:
return value
else:
return self._interpolate(section, option, value, d)
def items(self, section, raw=False, vars=None):
"""Return a list of tuples with (name, value) for each option
in the section.
All % interpolations are expanded in the return values, based on the
defaults passed into the constructor, unless the optional argument
`raw' is true. Additional substitutions may be provided using the
`vars' argument, which must be a dictionary whose contents overrides
any pre-existing defaults.
The section DEFAULT is special.
"""
d = self._defaults.copy()
try:
d.update(self._sections[section])
except KeyError:
if section != DEFAULTSECT:
raise NoSectionError(section)
# Update with the entry specific variables
if vars:
for key, value in vars.items():
d[self.optionxform(key)] = value
options = d.keys()
if "__name__" in options:
options.remove("__name__")
if raw:
return [(option, d[option])
for option in options]
else:
return [(option, self._interpolate(section, option, d[option], d))
for option in options]
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
value = rawval
depth = MAX_INTERPOLATION_DEPTH
while depth: # Loop through this until it's done
depth -= 1
if "%(" in value:
value = self._KEYCRE.sub(self._interpolation_replace, value)
try:
value = value % vars
except KeyError, e:
raise InterpolationMissingOptionError(
option, section, rawval, e[0])
else:
break
if "%(" in value:
raise InterpolationDepthError(option, section, rawval)
return value
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
def _interpolation_replace(self, match):
s = match.group(1)
if s is None:
return match.group()
else:
return "%%(%s)s" % self.optionxform(s)
class SafeConfigParser(ConfigParser):
def _interpolate(self, section, option, rawval, vars):
# do the string interpolation
L = []
self._interpolate_some(option, L, rawval, section, vars, 1)
return ''.join(L)
_interpvar_match = re.compile(r"%\(([^)]+)\)s").match
def _interpolate_some(self, option, accum, rest, section, map, depth):
if depth > MAX_INTERPOLATION_DEPTH:
raise InterpolationDepthError(option, section, rest)
while rest:
p = rest.find("%")
if p < 0:
accum.append(rest)
return
if p > 0:
accum.append(rest[:p])
rest = rest[p:]
# p is no longer used
c = rest[1:2]
if c == "%":
accum.append("%")
rest = rest[2:]
elif c == "(":
m = self._interpvar_match(rest)
if m is None:
raise InterpolationSyntaxError(option, section,
"bad interpolation variable reference %r" % rest)
var = self.optionxform(m.group(1))
rest = rest[m.end():]
try:
v = map[var]
except KeyError:
raise InterpolationMissingOptionError(
option, section, rest, var)
if "%" in v:
self._interpolate_some(option, accum, v,
section, map, depth + 1)
else:
accum.append(v)
else:
raise InterpolationSyntaxError(
option, section,
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
def set(self, section, option, value):
"""Set an option. Extend ConfigParser.set: check for string values."""
if not isinstance(value, basestring):
raise TypeError("option values must be strings")
ConfigParser.set(self, section, option, value)
| apache-2.0 |
wshallum/ansible | lib/ansible/modules/network/nxos/nxos_pim_interface.py | 13 | 31328 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_pim_interface
version_added: "2.2"
short_description: Manages PIM interface configuration.
description:
- Manages PIM interface configuration settings.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
notes:
- When C(state=default), supported params will be reset to a default state.
These include C(dr_prio), C(hello_auth_key), C(hello_interval), C(jp_policy_out),
C(jp_policy_in), C(jp_type_in), C(jp_type_out), C(border), C(neighbor_policy),
C(neighbor_type).
- The C(hello_auth_key) param is not idempotent.
- C(hello_auth_key) only supports clear text passwords.
- When C(state=absent), pim interface configuration will be set to defaults and pim-sm
will be disabled on the interface.
- PIM must be enabled on the device to use this module.
- This module is for Layer 3 interfaces.
options:
interface:
description:
- Full name of the interface such as Ethernet1/33.
required: true
sparse:
description:
- Enable/disable sparse-mode on the interface.
required: false
default: true
choices: ['true', 'false']
hello_auth_key:
description:
- Authentication for hellos on this interface.
required: false
default: null
hello_interval:
description:
- Hello interval in milliseconds for this interface.
required: false
default: null
choices: ['true', 'false']
jp_policy_out:
description:
- Policy for join-prune messages (outbound).
required: true
default: null
jp_policy_in:
description:
- Policy for join-prune messages (inbound).
required: false
default: null
jp_type_out:
description:
- Type of policy mapped to C(jp_policy_out).
required: false
default: null
choices: ['prefix', 'routemap']
jp_type_in:
description:
- Type of policy mapped to C(jp_policy_in).
required: false
default: null
choices: ['prefix', 'routemap']
border:
description:
- Configures interface to be a boundary of a PIM domain.
required: false
default: null
choices: ['true', 'false']
neighbor_policy:
description:
- Configures a neighbor policy for filtering adjacencies.
required: false
default: null
neighbor_type:
description:
- Type of policy mapped to neighbor_policy.
required: false
default: null
choices: ['prefix', 'routemap']
state:
description:
- Manages desired state of the resource.
required: false
default: present
choices: ['present', 'default']
'''
EXAMPLES = '''
# ensure PIM is not running on the interface
- nxos_pim_interface:
interface: eth1/33
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure the interface has pim-sm enabled with the appropriate priority and hello interval
- nxos_pim_interface:
interface: eth1/33
dr_prio: 10
hello_interval: 40
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure join-prune policies exist
- nxos_pim_interface:
interface: eth1/33
jp_policy_in: JPIN
jp_policy_out: JPOUT
jp_type_in: routemap
jp_type_out: routemap
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ensure defaults are in place
- nxos_pim_interface:
interface: eth1/33
state: default
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"interface": "eth1/33", "neighbor_policy": "test",
"neighbor_type": "routemap", "sparse": true}
existing:
description:
- k/v pairs of existing configuration
type: dict
sample: {"border": false, "dr_prio": "1", "hello_interval": "30000",
"isauth": false, "jp_bidir": false, "jp_policy_in": "JPIN",
"jp_policy_out": "1", "jp_type_in": "routemap",
"jp_type_out": null, "neighbor_policy": "test1",
"neighbor_type": "prefix", "sparse": true}
end_state:
description: k/v pairs of configuration after module execution
returned: always
type: dict
sample: {"border": false, "dr_prio": "1", "hello_interval": "30000",
"isauth": false, "jp_bidir": false, "jp_policy_in": "JPIN",
"jp_policy_out": "1", "jp_type_in": "routemap",
"jp_type_out": null, "neighbor_policy": "test",
"neighbor_type": "routemap", "sparse": true}
updates:
description: command sent to the device
returned: always
type: list
sample: ["interface eth1/33", "ip pim neighbor-policy test",
"ip pim neighbor-policy test"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
import time
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module, text=False):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0] or response[0] == '\n' or '^' in response[0]:
body = []
elif 'show run' in command or text:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show', text=False):
if module.params['transport'] == 'cli':
if 'show run' not in command and text is False:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module, text=text)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def local_existing(gexisting):
jp_bidir = False
isauth = False
if gexisting:
jp_bidir = gexisting.get('jp_bidir')
isauth = gexisting.get('isauth')
if jp_bidir and isauth:
gexisting.pop('jp_bidir')
gexisting.pop('isauth')
gexisting['sparse'] = True
return gexisting, jp_bidir, isauth
def get_interface_type(interface):
if interface.upper().startswith('ET'):
return 'ethernet'
elif interface.upper().startswith('VL'):
return 'svi'
elif interface.upper().startswith('LO'):
return 'loopback'
elif interface.upper().startswith('MG'):
return 'management'
elif interface.upper().startswith('MA'):
return 'management'
elif interface.upper().startswith('PO'):
return 'portchannel'
else:
return 'unknown'
def get_interface_mode(interface, intf_type, module):
command = 'show interface {0}'.format(interface)
mode = 'unknown'
interface_table = {}
body = execute_show_command(command, module)
try:
interface_table = body[0]['TABLE_interface']['ROW_interface']
except (KeyError, AttributeError, IndexError):
return mode
if intf_type in ['ethernet', 'portchannel']:
mode = str(interface_table.get('eth_mode', 'layer3'))
if mode in ['access', 'trunk']:
mode = 'layer2'
elif mode == 'routed':
mode = 'layer3'
elif intf_type in ['loopback', 'svi']:
mode = 'layer3'
return mode
def get_pim_interface(module, interface):
pim_interface = {}
command = 'show ip pim interface {0}'.format(interface)
body = execute_show_command(command, module,
command_type='cli_show_ascii', text=True)
if body:
if 'not running' not in body[0]:
body = execute_show_command(command, module)
try:
get_data = body[0]['TABLE_iod']['ROW_iod']
if isinstance(get_data.get('dr-priority'), unicode) or \
isinstance(get_data.get('dr-priority'), str):
pim_interface['dr_prio'] = get_data.get('dr-priority')
else:
pim_interface['dr_prio'] = get_data.get('dr-priority')[0]
hello_interval = get_data.get('hello-interval-sec')
if hello_interval:
hello_interval_msec = int(get_data.get('hello-interval-sec'))*1000
pim_interface['hello_interval'] = str(hello_interval_msec)
border = get_data.get('is-border')
if border == 'true':
pim_interface['border'] = True
elif border == 'false':
pim_interface['border'] = False
isauth = get_data.get('isauth-config')
if isauth == 'true':
pim_interface['isauth'] = True
elif isauth == 'false':
pim_interface['isauth'] = False
pim_interface['neighbor_policy'] = get_data.get('nbr-policy-name')
if pim_interface['neighbor_policy'] == 'none configured':
pim_interface['neighbor_policy'] = None
jp_in_policy = get_data.get('jp-in-policy-name')
pim_interface['jp_policy_in'] = jp_in_policy
if jp_in_policy == 'none configured':
pim_interface['jp_policy_in'] = None
if isinstance(get_data.get('jp-out-policy-name'), unicode) or \
isinstance(get_data.get('jp-out-policy-name'), str):
pim_interface['jp_policy_out'] = get_data.get('jp-out-policy-name')
else:
pim_interface['jp_policy_out'] = get_data.get(
'jp-out-policy-name')[0]
if pim_interface['jp_policy_out'] == 'none configured':
pim_interface['jp_policy_out'] = None
except (KeyError, AttributeError, TypeError, IndexError):
return {}
command = 'show run interface {0}'.format(interface)
body = execute_show_command(command, module, command_type='cli_show_ascii')
jp_configs = []
neigh = None
if body:
all_lines = body[0].splitlines()
for each in all_lines:
if 'jp-policy' in each:
jp_configs.append(str(each.strip()))
elif 'neighbor-policy' in each:
neigh = str(each)
pim_interface['neighbor_type'] = None
neigh_type = None
if neigh:
if 'prefix-list' in neigh:
neigh_type = 'prefix'
else:
neigh_type = 'routemap'
pim_interface['neighbor_type'] = neigh_type
len_existing = len(jp_configs)
list_of_prefix_type = len([x for x in jp_configs if 'prefix-list' in x])
jp_type_in = None
jp_type_out = None
jp_bidir = False
if len_existing == 1:
# determine type
last_word = jp_configs[0].split(' ')[-1]
if last_word == 'in':
if list_of_prefix_type:
jp_type_in = 'prefix'
else:
jp_type_in = 'routemap'
elif last_word == 'out':
if list_of_prefix_type:
jp_type_out = 'prefix'
else:
jp_type_out = 'routemap'
else:
jp_bidir = True
if list_of_prefix_type:
jp_type_in = 'prefix'
jp_type_out = 'routemap'
else:
jp_type_in = 'routemap'
jp_type_out = 'routemap'
else:
for each in jp_configs:
last_word = each.split(' ')[-1]
if last_word == 'in':
if 'prefix-list' in each:
jp_type_in = 'prefix'
else:
jp_type_in = 'routemap'
elif last_word == 'out':
if 'prefix-list' in each:
jp_type_out = 'prefix'
else:
jp_type_out = 'routemap'
pim_interface['jp_type_in'] = jp_type_in
pim_interface['jp_type_out'] = jp_type_out
pim_interface['jp_bidir'] = jp_bidir
return pim_interface
def fix_delta(delta, existing):
if delta.get('sparse') is False and existing.get('sparse') is None:
delta.pop('sparse')
return delta
def config_pim_interface(delta, existing, jp_bidir, isauth):
command = None
commands = []
delta = fix_delta(delta, existing)
CMDS = {
'sparse': 'ip pim sparse-mode',
'dr_prio': 'ip pim dr-priority {0}',
'hello_interval': 'ip pim hello-interval {0}',
'hello_auth_key': 'ip pim hello-authentication ah-md5 {0}',
'border': 'ip pim border',
'jp_policy_out': 'ip pim jp-policy prefix-list {0} out',
'jp_policy_in': 'ip pim jp-policy prefix-list {0} in',
'jp_type_in': '',
'jp_type_out': '',
'neighbor_policy': 'ip pim neighbor-policy prefix-list {0}',
'neighbor_type': ''
}
if jp_bidir:
if delta.get('jp_policy_in') or delta.get('jp_policy_out'):
if existing.get('jp_type_in') == 'prefix':
command = 'no ip pim jp-policy prefix-list {0}'.format(
existing.get('jp_policy_in')
)
else:
command = 'no ip pim jp-policy {0}'.format(
existing.get('jp_policy_in')
)
if command:
commands.append(command)
for k, v in delta.iteritems():
if k in ['dr_prio', 'hello_interval', 'hello_auth_key', 'border',
'sparse']:
if v:
command = CMDS.get(k).format(v)
elif k == 'hello_auth_key':
if isauth:
command = 'no ip pim hello-authentication ah-md5'
else:
command = 'no ' + CMDS.get(k).format(v)
if command:
commands.append(command)
elif k in ['neighbor_policy', 'jp_policy_in', 'jp_policy_out',
'neighbor_type']:
if k in ['neighbor_policy', 'neighbor_type']:
temp = delta.get('neighbor_policy') or existing.get(
'neighbor_policy')
if delta.get('neighbor_type') == 'prefix':
command = CMDS.get(k).format(temp)
elif delta.get('neighbor_type') == 'routemap':
command = 'ip pim neighbor-policy {0}'.format(temp)
elif existing.get('neighbor_type') == 'prefix':
command = CMDS.get(k).format(temp)
elif existing.get('neighbor_type') == 'routemap':
command = 'ip pim neighbor-policy {0}'.format(temp)
elif k in ['jp_policy_in', 'jp_type_in']:
temp = delta.get('jp_policy_in') or existing.get(
'jp_policy_in')
if delta.get('jp_type_in') == 'prefix':
command = CMDS.get(k).format(temp)
elif delta.get('jp_type_in') == 'routemap':
command = 'ip pim jp-policy {0} in'.format(temp)
elif existing.get('jp_type_in') == 'prefix':
command = CMDS.get(k).format(temp)
elif existing.get('jp_type_in') == 'routemap':
command = 'ip pim jp-policy {0} in'.format(temp)
elif k in ['jp_policy_out', 'jp_type_out']:
temp = delta.get('jp_policy_out') or existing.get(
'jp_policy_out')
if delta.get('jp_type_out') == 'prefix':
command = CMDS.get(k).format(temp)
elif delta.get('jp_type_out') == 'routemap':
command = 'ip pim jp-policy {0} out'.format(temp)
elif existing.get('jp_type_out') == 'prefix':
command = CMDS.get(k).format(temp)
elif existing.get('jp_type_out') == 'routemap':
command = 'ip pim jp-policy {0} out'.format(temp)
if command:
commands.append(command)
command = None
return commands
def get_pim_interface_defaults():
dr_prio = '1'
border = False
hello_interval = '30000'
hello_auth_key = False
args = dict(dr_prio=dr_prio, border=border,
hello_interval=hello_interval,
hello_auth_key=hello_auth_key)
default = dict((param, value) for (param, value) in args.iteritems()
if value is not None)
return default
def default_pim_interface_policies(existing, jp_bidir):
commands = []
if jp_bidir:
if existing.get('jp_policy_in') or existing.get('jp_policy_out'):
if existing.get('jp_type_in') == 'prefix':
command = 'no ip pim jp-policy prefix-list {0}'.format(
existing.get('jp_policy_in')
)
if command:
commands.append(command)
elif not jp_bidir:
command = None
for k, v in existing.iteritems():
if k == 'jp_policy_in':
if existing.get('jp_policy_in'):
if existing.get('jp_type_in') == 'prefix':
command = 'no ip pim jp-policy prefix-list {0} in'.format(
existing.get('jp_policy_in')
)
else:
command = 'no ip pim jp-policy {0} in'.format(
existing.get('jp_policy_in')
)
elif k == 'jp_policy_out':
if existing.get('jp_policy_out'):
if existing.get('jp_type_out') == 'prefix':
command = 'no ip pim jp-policy prefix-list {0} out'.format(
existing.get('jp_policy_out')
)
else:
command = 'no ip pim jp-policy {0} out'.format(
existing.get('jp_policy_out')
)
if command:
commands.append(command)
command = None
if existing.get('neighbor_policy'):
command = 'no ip pim neighbor-policy'
commands.append(command)
return commands
def config_pim_interface_defaults(existing, jp_bidir, isauth):
command = []
# returns a dict
defaults = get_pim_interface_defaults()
delta = dict(set(defaults.iteritems()).difference(
existing.iteritems()))
if delta:
# returns a list
command = config_pim_interface(delta, existing,
jp_bidir, isauth)
comm = default_pim_interface_policies(existing, jp_bidir)
if comm:
for each in comm:
command.append(each)
return command
def main():
argument_spec=dict(
interface=dict(required=True),
sparse=dict(type='bool', default=True),
dr_prio=dict(type='str'),
hello_auth_key=dict(type='str'),
hello_interval=dict(type='int'),
jp_policy_out=dict(type='str'),
jp_policy_in=dict(type='str'),
jp_type_out=dict(choices=['prefix', 'routemap']),
jp_type_in=dict(choices=['prefix', 'routemap']),
border=dict(type='bool'),
neighbor_policy=dict(type='str'),
neighbor_type=dict(choices=['prefix', 'routemap']),
state=dict(choices=['present', 'absent', 'default'],
default='present'),
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
state = module.params['state']
sparse = module.params['sparse']
interface = module.params['interface']
jp_type_in = module.params['jp_type_in']
jp_type_out = module.params['jp_type_out']
jp_policy_in = module.params['jp_policy_in']
jp_policy_out = module.params['jp_policy_out']
neighbor_policy = module.params['neighbor_policy']
neighbor_type = module.params['neighbor_type']
hello_interval = module.params['hello_interval']
intf_type = get_interface_type(interface)
if get_interface_mode(interface, intf_type, module) == 'layer2':
module.fail_json(msg='this module only works on Layer 3 interfaces.')
if jp_policy_in:
if not jp_type_in:
module.fail_json(msg='jp_type_in required when using jp_policy_in.')
if jp_policy_out:
if not jp_type_out:
module.fail_json(msg='jp_type_out required when using '
' jp_policy_out.')
if neighbor_policy:
if not neighbor_type:
module.fail_json(msg='neighbor_type required when using '
'neighbor_policy.')
get_existing = get_pim_interface(module, interface)
existing, jp_bidir, isauth = local_existing(get_existing)
end_state = existing
changed = False
commands = []
args = [
'interface',
'sparse',
'dr_prio',
'hello_auth_key',
'hello_interval',
'jp_policy_out',
'jp_type_out',
'jp_type_in',
'jp_policy_in',
'border',
'neighbor_type',
'neighbor_policy'
]
proposed = dict((k, v) for k, v in module.params.iteritems()
if v is not None and k in args)
'''
CANNOT_ABSENT = ['dr_prio', 'hello_interval',
'hello_auth_key', 'jp_policy_out', 'jp_policy_in',
'jp_type_out', 'jp_type_in', 'border', 'neighbor_type',
'neighbor_policy']
'''
if hello_interval:
proposed['hello_interval'] = str(proposed['hello_interval'] * 1000)
delta = dict(set(proposed.iteritems()).difference(existing.iteritems()))
if state == 'present':
if delta:
command = config_pim_interface(delta, existing, jp_bidir, isauth)
if command:
commands.append(command)
elif state == 'default':
defaults = config_pim_interface_defaults(existing, jp_bidir, isauth)
if defaults:
commands.append(defaults)
elif state == 'absent':
if existing.get('sparse') == True:
delta['sparse'] = False
# defaults is a list of commands
defaults = config_pim_interface_defaults(existing, jp_bidir, isauth)
if defaults:
commands.append(defaults)
command = config_pim_interface(delta, existing, jp_bidir, isauth)
commands.append(command)
if commands:
commands.insert(0, ['interface {0}'.format(interface)])
cmds = flatten_list(commands)
results = {}
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
execute_config_command(cmds, module)
time.sleep(1)
get_existing = get_pim_interface(module, interface)
end_state, jp_bidir, isauth = local_existing(get_existing)
if 'configure' in cmds:
cmds.pop(0)
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
botswana-harvard/tshilo-dikotla | td_lab/models/order_item.py | 2 | 1371 | from django.db import models
from django.utils import timezone
from edc_base.model.models import BaseUuidModel
from edc_export.models import ExportTrackingFieldsMixin
from edc_sync.models import SyncModelMixin, SyncHistoricalRecords
from ..managers import OrderItemManager
from .aliquot import Aliquot
from .order import Order
from .panel import Panel
class OrderItem(SyncModelMixin, ExportTrackingFieldsMixin, BaseUuidModel):
order = models.ForeignKey(Order)
aliquot = models.ForeignKey(Aliquot)
panel = models.ForeignKey(
Panel,
null=True,
blank=False,
)
order_identifier = models.CharField(
max_length=25,
null=True,
help_text='',
)
order_datetime = models.DateTimeField(
default=timezone.now
)
subject_identifier = models.CharField(
max_length=50,
null=True,
help_text="non-user helper field to simplify search and filtering")
objects = OrderItemManager()
history = SyncHistoricalRecords()
def save(self, *args, **kwargs):
self.subject_identifier = self.aliquot.receive.registered_subject.subject_identifier
super(OrderItem, self).save(*args, **kwargs)
def natural_key(self):
return (self.order_identifier, )
class Meta:
app_label = 'td_lab'
ordering = ['-order_datetime', ]
| gpl-2.0 |
jnhagelberg/incubator-atlas | distro/src/bin/atlas_kafka_setup_hook.py | 2 | 1491 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import atlas_client_cmdline as cmdline
import atlas_config as mc
def main():
conf_dir = cmdline.setup_conf_dir()
jvm_opts_list = cmdline.setup_jvm_opts_list(conf_dir, 'atlas_kafka_setup_hook.log')
atlas_classpath = cmdline.get_atlas_hook_classpath(conf_dir)
topics_array = mc.get_topics_to_create(conf_dir)
process = mc.java("org.apache.atlas.hook.AtlasTopicCreator", topics_array, atlas_classpath, jvm_opts_list)
return process.wait()
if __name__ == '__main__':
try:
returncode = main()
except Exception as e:
print "Exception in setting up Kafka topics for Atlas: %s" % str(e)
returncode = -1
sys.exit(returncode) | apache-2.0 |
agrimk/merlin | venv/lib/python2.7/site-packages/setuptools/command/install_scripts.py | 505 | 2231 | from distutils import log
import distutils.command.install_scripts as orig
import os
from pkg_resources import Distribution, PathMetadata, ensure_directory
class install_scripts(orig.install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
orig.install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
import setuptools.command.easy_install as ei
self.run_command("egg_info")
if self.distribution.scripts:
orig.install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
exec_param = getattr(bs_cmd, 'executable', None)
bw_cmd = self.get_finalized_command("bdist_wininst")
is_wininst = getattr(bw_cmd, '_is_running', False)
writer = ei.ScriptWriter
if is_wininst:
exec_param = "python.exe"
writer = ei.WindowsScriptWriter
# resolve the writer to the environment
writer = writer.best()
cmd = writer.command_spec_class.best().from_param(exec_param)
for args in writer.get_args(dist, cmd.as_header()):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
from setuptools.command.easy_install import chmod, current_umask
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
f = open(target, "w" + mode)
f.write(contents)
f.close()
chmod(target, 0o777 - mask)
| mit |
vongochung/buiquocviet | django/db/models/aggregates.py | 521 | 2101 | """
Classes to represent the definitions of aggregate functions.
"""
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| bsd-3-clause |
protocol7/python-koans | python 3/koans/about_strings.py | 7 | 5579 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutStrings(Koan):
def test_double_quoted_strings_are_strings(self):
string = "Hello, world."
self.assertEqual(__, isinstance(string, str))
def test_single_quoted_strings_are_also_strings(self):
string = 'Goodbye, world.'
self.assertEqual(__, isinstance(string, str))
def test_triple_quote_strings_are_also_strings(self):
string = """Howdy, world!"""
self.assertEqual(__, isinstance(string, str))
def test_triple_single_quotes_work_too(self):
string = '''Bonjour tout le monde!'''
self.assertEqual(__, isinstance(string, str))
def test_raw_strings_are_also_strings(self):
string = r"Konnichi wa, world!"
self.assertEqual(__, isinstance(string, str))
def test_use_single_quotes_to_create_string_with_double_quotes(self):
string = 'He said, "Go Away."'
self.assertEqual(__, string)
def test_use_double_quotes_to_create_strings_with_single_quotes(self):
string = "Don't"
self.assertEqual(__, string)
def test_use_backslash_for_escaping_quotes_in_strings(self):
a = "He said, \"Don't\""
b = 'He said, "Don\'t"'
self.assertEqual(__, (a == b))
def test_use_backslash_at_the_end_of_a_line_to_continue_onto_the_next_line(self):
string = "It was the best of times,\n\
It was the worst of times."
self.assertEqual(__, len(string))
def test_triple_quoted_strings_can_span_lines(self):
string = """
Howdy,
world!
"""
self.assertEqual(__, len(string))
def test_triple_quoted_strings_need_less_escaping(self):
a = "Hello \"world\"."
b = """Hello "world"."""
self.assertEqual(__, (a == b))
def but_you_still_have_to_be_careful_at_the_end_of_a_triple_quoted_string(self):
string = """Hello "world\""""
def test_plus_concatenates_strings(self):
string = "Hello, " + "world"
self.assertEqual(__, string)
def test_adjacent_strings_are_concatenated_automatically(self):
string = "Hello" ", " "World"
self.assertEqual(__, string)
def test_plus_will_not_modify_original_strings(self):
hi = "Hello, "
there = "world"
string = hi + there
self.assertEqual(__, hi)
self.assertEqual(__, there)
def test_plus_equals_will_append_to_end_of_string(self):
hi = "Hello, "
there = "world"
hi += there
self.assertEqual(__, hi)
def test_plus_equals_also_leaves_original_string_unmodified(self):
original = "Hello, "
hi = original
there = "world"
hi += there
self.assertEqual(__, original)
def test_most_strings_interpret_escape_characters(self):
string = "\n"
self.assertEqual('\n', string)
self.assertEqual("""\n""", string)
self.assertEqual(__, len(string))
def test_use_format_to_interpolate_variables(self):
value1 = 'one'
value2 = 2
string = "The values are {0} and {1}".format(value1, value2)
self.assertEqual(__, string)
def test_formatted_values_con_be_shown_in_any_order_or_be_repeated(self):
value1 = 'doh'
value2 = 'DOH'
string = "The values are {1}, {0}, {0} and {1}!".format(value1, value2)
self.assertEqual(__, string)
def test_any_python_expression_may_be_interpolated(self):
import math # import a standard python module with math functions
decimal_places = 4
string = "The square root of 5 is {0:.{1}f}".format(math.sqrt(5), \
decimal_places)
self.assertEqual(__, string)
def test_you_can_get_a_substring_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual(__, string[7:10])
def test_you_can_get_a_single_character_from_a_string(self):
string = "Bacon, lettuce and tomato"
self.assertEqual(__, string[1])
def test_single_characters_can_be_represented_by_integers(self):
self.assertEqual(__, ord('a'))
self.assertEqual(__, ord('b') == (ord('a') + 1))
def test_strings_can_be_split(self):
string = "Sausage Egg Cheese"
words = string.split()
self.assertListEqual([__, __, __], words)
def test_strings_can_be_split_with_different_patterns(self):
import re #import python regular expression library
string = "the,rain;in,spain"
pattern = re.compile(',|;')
words = pattern.split(string)
self.assertListEqual([__, __, __, __], words)
# Pattern is a Python regular expression pattern which matches ',' or ';'
def test_raw_strings_do_not_interpret_escape_characters(self):
string = r'\n'
self.assertNotEqual('\n', string)
self.assertEqual(__, string)
self.assertEqual(__, len(string))
# Useful in regular expressions, file paths, URLs, etc.
def test_strings_can_be_joined(self):
words = ["Now", "is", "the", "time"]
self.assertEqual(__, ' '.join(words))
def test_strings_can_change_case(self):
self.assertEqual(__, 'guido'.capitalize())
self.assertEqual(__, 'guido'.upper())
self.assertEqual(__, 'TimBot'.lower())
self.assertEqual(__, 'guido van rossum'.title())
self.assertEqual(__, 'ToTaLlY aWeSoMe'.swapcase())
| mit |
KyleJamesWalker/ansible | lib/ansible/modules/cloud/amazon/ec2_vpc_nat_gateway.py | 33 | 34892 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_vpc_nat_gateway
short_description: Manage AWS VPC NAT Gateways.
description:
- Ensure the state of AWS VPC NAT Gateways based on their id, allocation and subnet ids.
version_added: "2.2"
requirements: [boto3, botocore]
options:
state:
description:
- Ensure NAT Gateway is present or absent.
required: false
default: "present"
choices: ["present", "absent"]
nat_gateway_id:
description:
- The id AWS dynamically allocates to the NAT Gateway on creation.
This is required when the absent option is present.
required: false
default: None
subnet_id:
description:
- The id of the subnet to create the NAT Gateway in. This is required
with the present option.
required: false
default: None
allocation_id:
description:
- The id of the elastic IP allocation. If this is not passed and the
eip_address is not passed. An EIP is generated for this NAT Gateway.
required: false
default: None
eip_address:
description:
- The elastic IP address of the EIP you want attached to this NAT Gateway.
If this is not passed and the allocation_id is not passed,
an EIP is generated for this NAT Gateway.
required: false
if_exist_do_not_create:
description:
- if a NAT Gateway exists already in the subnet_id, then do not create a new one.
required: false
default: false
release_eip:
description:
- Deallocate the EIP from the VPC.
- Option is only valid with the absent state.
- You should use this with the wait option. Since you can not release an address while a delete operation is happening.
required: false
default: true
wait:
description:
- Wait for operation to complete before returning.
required: false
default: false
wait_timeout:
description:
- How many seconds to wait for an operation to complete before timing out.
required: false
default: 300
client_token:
description:
- Optional unique token to be used during create to ensure idempotency.
When specifying this option, ensure you specify the eip_address parameter
as well otherwise any subsequent runs will fail.
required: false
author:
- "Allen Sanabria (@linuxdynasty)"
- "Jon Hadfield (@jonhadfield)"
- "Karen Cheng(@Etherdaemon)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Create new nat gateway with client token.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
region: ap-southeast-2
client_token: abcd-12345678
register: new_nat_gateway
- name: Create new nat gateway using an allocation-id.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
allocation_id: eipalloc-12345678
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway, using an EIP address and wait for available status.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
eip_address: 52.1.1.1
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
register: new_nat_gateway
- name: Create new nat gateway and allocate new EIP if a nat gateway does not yet exist in the subnet.
ec2_vpc_nat_gateway:
state: present
subnet_id: subnet-12345678
wait: yes
region: ap-southeast-2
if_exist_do_not_create: true
register: new_nat_gateway
- name: Delete nat gateway using discovered nat gateways from facts module.
ec2_vpc_nat_gateway:
state: absent
region: ap-southeast-2
wait: yes
nat_gateway_id: "{{ item.NatGatewayId }}"
release_eip: yes
register: delete_nat_gateway_result
with_items: "{{ gateways_to_remove.result }}"
- name: Delete nat gateway and wait for deleted status.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
wait: yes
wait_timeout: 500
region: ap-southeast-2
- name: Delete nat gateway and release EIP.
ec2_vpc_nat_gateway:
state: absent
nat_gateway_id: nat-12345678
release_eip: yes
wait: yes
wait_timeout: 300
region: ap-southeast-2
'''
RETURN = '''
create_time:
description: The ISO 8601 date time formatin UTC.
returned: In all cases.
type: string
sample: "2016-03-05T05:19:20.282000+00:00'"
nat_gateway_id:
description: id of the VPC NAT Gateway
returned: In all cases.
type: string
sample: "nat-0d1e3a878585988f8"
subnet_id:
description: id of the Subnet
returned: In all cases.
type: string
sample: "subnet-12345"
state:
description: The current state of the NAT Gateway.
returned: In all cases.
type: string
sample: "available"
vpc_id:
description: id of the VPC.
returned: In all cases.
type: string
sample: "vpc-12345"
nat_gateway_addresses:
description: List of dictionairies containing the public_ip, network_interface_id, private_ip, and allocation_id.
returned: In all cases.
type: string
sample: [
{
'public_ip': '52.52.52.52',
'network_interface_id': 'eni-12345',
'private_ip': '10.0.0.100',
'allocation_id': 'eipalloc-12345'
}
]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import ec2_argument_spec, get_aws_connection_info, boto3_conn
import datetime
import random
import re
import time
from dateutil.tz import tzutc
try:
import botocore
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
DRY_RUN_GATEWAYS = [
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "available",
"create_time": "2016-03-05T05:19:20.282000+00:00",
"vpc_id": "vpc-12345678"
}
]
DRY_RUN_GATEWAY_UNCONVERTED = [
{
'VpcId': 'vpc-12345678',
'State': 'available',
'NatGatewayId': 'nat-123456789',
'SubnetId': 'subnet-123456789',
'NatGatewayAddresses': [
{
'PublicIp': '55.55.55.55',
'NetworkInterfaceId': 'eni-1234567',
'AllocationId': 'eipalloc-1234567',
'PrivateIp': '10.0.0.102'
}
],
'CreateTime': datetime.datetime(2016, 3, 5, 5, 19, 20, 282000, tzinfo=tzutc())
}
]
DRY_RUN_ALLOCATION_UNCONVERTED = {
'Addresses': [
{
'PublicIp': '55.55.55.55',
'Domain': 'vpc',
'AllocationId': 'eipalloc-1234567'
}
]
}
DRY_RUN_MSGS = 'DryRun Mode:'
def convert_to_lower(data):
"""Convert all uppercase keys in dict with lowercase_
Args:
data (dict): Dictionary with keys that have upper cases in them
Example.. FooBar == foo_bar
if a val is of type datetime.datetime, it will be converted to
the ISO 8601
Basic Usage:
>>> test = {'FooBar': []}
>>> test = convert_to_lower(test)
{
'foo_bar': []
}
Returns:
Dictionary
"""
results = dict()
if isinstance(data, dict):
for key, val in data.items():
key = re.sub(r'(([A-Z]{1,3}){1})', r'_\1', key).lower()
if key[0] == '_':
key = key[1:]
if isinstance(val, datetime.datetime):
results[key] = val.isoformat()
elif isinstance(val, dict):
results[key] = convert_to_lower(val)
elif isinstance(val, list):
converted = list()
for item in val:
converted.append(convert_to_lower(item))
results[key] = converted
else:
results[key] = val
return results
def get_nat_gateways(client, subnet_id=None, nat_gateway_id=None,
states=None, check_mode=False):
"""Retrieve a list of NAT Gateways
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
subnet_id (str): The subnet_id the nat resides in.
nat_gateway_id (str): The Amazon nat id.
states (list): States available (pending, failed, available, deleting, and deleted)
default=None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> get_nat_gateways(client, subnet_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345678"
}
Returns:
Tuple (bool, str, list)
"""
params = dict()
err_msg = ""
gateways_retrieved = False
existing_gateways = list()
if not states:
states = ['available', 'pending']
if nat_gateway_id:
params['NatGatewayIds'] = [nat_gateway_id]
else:
params['Filter'] = [
{
'Name': 'subnet-id',
'Values': [subnet_id]
},
{
'Name': 'state',
'Values': states
}
]
try:
if not check_mode:
gateways = client.describe_nat_gateways(**params)['NatGateways']
if gateways:
for gw in gateways:
existing_gateways.append(convert_to_lower(gw))
gateways_retrieved = True
else:
gateways_retrieved = True
if nat_gateway_id:
if DRY_RUN_GATEWAYS[0]['nat_gateway_id'] == nat_gateway_id:
existing_gateways = DRY_RUN_GATEWAYS
elif subnet_id:
if DRY_RUN_GATEWAYS[0]['subnet_id'] == subnet_id:
existing_gateways = DRY_RUN_GATEWAYS
err_msg = '{0} Retrieving gateways'.format(DRY_RUN_MSGS)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return gateways_retrieved, err_msg, existing_gateways
def wait_for_status(client, wait_timeout, nat_gateway_id, status,
check_mode=False):
"""Wait for the NAT Gateway to reach a status
Args:
client (botocore.client.EC2): Boto3 client
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
nat_gateway_id (str): The Amazon nat id.
status (str): The status to wait for.
examples. status=available, status=deleted
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-12345678'
>>> allocation_id = 'eipalloc-12345678'
>>> wait_for_status(client, subnet_id, allocation_id)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-12345678"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-12345677"
}
]
Returns:
Tuple (bool, str, dict)
"""
polling_increment_secs = 5
wait_timeout = time.time() + wait_timeout
status_achieved = False
nat_gateway = dict()
states = ['pending', 'failed', 'available', 'deleting', 'deleted']
err_msg = ""
while wait_timeout > time.time():
try:
gws_retrieved, err_msg, nat_gateways = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if gws_retrieved and nat_gateways:
nat_gateway = nat_gateways[0]
if check_mode:
nat_gateway['state'] = status
if nat_gateway.get('state') == status:
status_achieved = True
break
elif nat_gateway.get('state') == 'failed':
err_msg = nat_gateway.get('failure_message')
break
elif nat_gateway.get('state') == 'pending':
if 'failure_message' in nat_gateway:
err_msg = nat_gateway.get('failure_message')
status_achieved = False
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if not status_achieved:
err_msg = "Wait time out reached, while waiting for results"
return status_achieved, err_msg, nat_gateway
def gateway_in_subnet_exists(client, subnet_id, allocation_id=None,
check_mode=False):
"""Retrieve all NAT Gateways for a subnet.
Args:
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> gateway_in_subnet_exists(client, subnet_id, allocation_id)
(
[
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-123456789",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
],
False
)
Returns:
Tuple (list, bool)
"""
allocation_id_exists = False
gateways = []
states = ['available', 'pending']
gws_retrieved, _, gws = (
get_nat_gateways(
client, subnet_id, states=states, check_mode=check_mode
)
)
if not gws_retrieved:
return gateways, allocation_id_exists
for gw in gws:
for address in gw['nat_gateway_addresses']:
if allocation_id:
if address.get('allocation_id') == allocation_id:
allocation_id_exists = True
gateways.append(gw)
else:
gateways.append(gw)
return gateways, allocation_id_exists
def get_eip_allocation_id_by_address(client, eip_address, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
eip_address (str): The Elastic IP Address of the EIP.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> eip_address = '52.87.29.36'
>>> get_eip_allocation_id_by_address(client, eip_address)
'eipalloc-36014da3'
Returns:
Tuple (str, str)
"""
params = {
'PublicIps': [eip_address],
}
allocation_id = None
err_msg = ""
try:
if not check_mode:
allocations = client.describe_addresses(**params)['Addresses']
if len(allocations) == 1:
allocation = allocations[0]
else:
allocation = None
else:
dry_run_eip = (
DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]['PublicIp']
)
if dry_run_eip == eip_address:
allocation = DRY_RUN_ALLOCATION_UNCONVERTED['Addresses'][0]
else:
allocation = None
if allocation:
if allocation.get('Domain') != 'vpc':
err_msg = (
"EIP {0} is a non-VPC EIP, please allocate a VPC scoped EIP"
.format(eip_address)
)
else:
allocation_id = allocation.get('AllocationId')
else:
err_msg = (
"EIP {0} does not exist".format(eip_address)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return allocation_id, err_msg
def allocate_eip_address(client, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocate_eip_address(client)
True
Returns:
Tuple (bool, str)
"""
ip_allocated = False
new_eip = None
err_msg = ''
params = {
'Domain': 'vpc',
}
try:
if check_mode:
ip_allocated = True
random_numbers = (
''.join(str(x) for x in random.sample(range(0, 9), 7))
)
new_eip = 'eipalloc-{0}'.format(random_numbers)
else:
new_eip = client.allocate_address(**params)['AllocationId']
ip_allocated = True
err_msg = 'eipalloc id {0} created'.format(new_eip)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_allocated, err_msg, new_eip
def release_address(client, allocation_id, check_mode=False):
"""Release an EIP from your EIP Pool
Args:
client (botocore.client.EC2): Boto3 client
allocation_id (str): The eip Amazon identifier.
Kwargs:
check_mode (bool): if set to true, do not run anything and
falsify the results.
Basic Usage:
>>> client = boto3.client('ec2')
>>> allocation_id = "eipalloc-123456"
>>> release_address(client, allocation_id)
True
Returns:
Boolean, string
"""
err_msg = ''
if check_mode:
return True, ''
ip_released = False
try:
client.describe_addresses(AllocationIds=[allocation_id])
except botocore.exceptions.ClientError as e:
# IP address likely already released
# Happens with gateway in 'deleted' state that
# still lists associations
return True, str(e)
try:
client.release_address(AllocationId=allocation_id)
ip_released = True
except botocore.exceptions.ClientError as e:
err_msg = str(e)
return ip_released, err_msg
def create(client, subnet_id, allocation_id, client_token=None,
wait=False, wait_timeout=0, if_exist_do_not_create=False,
check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
allocation_id (str): The eip Amazon identifier.
Kwargs:
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-1234567'
>>> allocation_id = 'eipalloc-1234567'
>>> create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-123456789",
"subnet_id": "subnet-1234567",
"nat_gateway_addresses": [
{
"public_ip": "55.55.55.55",
"network_interface_id": "eni-1234567",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-1234567"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-1234567"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'SubnetId': subnet_id,
'AllocationId': allocation_id
}
request_time = datetime.datetime.utcnow()
changed = False
success = False
token_provided = False
err_msg = ""
if client_token:
token_provided = True
params['ClientToken'] = client_token
try:
if not check_mode:
result = client.create_nat_gateway(**params)["NatGateway"]
else:
result = DRY_RUN_GATEWAY_UNCONVERTED[0]
result['CreateTime'] = datetime.datetime.utcnow()
result['NatGatewayAddresses'][0]['AllocationId'] = allocation_id
result['SubnetId'] = subnet_id
success = True
changed = True
create_time = result['CreateTime'].replace(tzinfo=None)
if token_provided and (request_time > create_time):
changed = False
elif wait:
success, err_msg, result = (
wait_for_status(
client, wait_timeout, result['NatGatewayId'], 'available',
check_mode=check_mode
)
)
if success:
err_msg = (
'NAT gateway {0} created'.format(result['nat_gateway_id'])
)
except botocore.exceptions.ClientError as e:
if "IdempotentParameterMismatch" in e.message:
err_msg = (
'NAT Gateway does not support update and token has already been provided'
)
else:
err_msg = str(e)
success = False
changed = False
result = None
return success, changed, err_msg, result
def pre_create(client, subnet_id, allocation_id=None, eip_address=None,
if_exist_do_not_create=False, wait=False, wait_timeout=0,
client_token=None, check_mode=False):
"""Create an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
subnet_id (str): The subnet_id the nat resides in.
Kwargs:
allocation_id (str): The EIP Amazon identifier.
default = None
eip_address (str): The Elastic IP Address of the EIP.
default = None
if_exist_do_not_create (bool): if a nat gateway already exists in this
subnet, than do not create another one.
default = False
wait (bool): Wait for the nat to be in the deleted state before returning.
default = False
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
default = 0
client_token (str):
default = None
Basic Usage:
>>> client = boto3.client('ec2')
>>> subnet_id = 'subnet-w4t12897'
>>> allocation_id = 'eipalloc-36014da3'
>>> pre_create(client, subnet_id, allocation_id, if_exist_do_not_create=True, wait=True, wait_timeout=500)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, bool, str, list)
"""
success = False
changed = False
err_msg = ""
results = list()
if not allocation_id and not eip_address:
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(client, subnet_id, check_mode=check_mode)
)
if len(existing_gateways) > 0 and if_exist_do_not_create:
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
else:
success, err_msg, allocation_id = (
allocate_eip_address(client, check_mode=check_mode)
)
if not success:
return success, 'False', err_msg, dict()
elif eip_address or allocation_id:
if eip_address and not allocation_id:
allocation_id, err_msg = (
get_eip_allocation_id_by_address(
client, eip_address, check_mode=check_mode
)
)
if not allocation_id:
success = False
changed = False
return success, changed, err_msg, dict()
existing_gateways, allocation_id_exists = (
gateway_in_subnet_exists(
client, subnet_id, allocation_id, check_mode=check_mode
)
)
if len(existing_gateways) > 0 and (allocation_id_exists or if_exist_do_not_create):
success = True
changed = False
results = existing_gateways[0]
err_msg = (
'NAT Gateway {0} already exists in subnet_id {1}'
.format(
existing_gateways[0]['nat_gateway_id'], subnet_id
)
)
return success, changed, err_msg, results
success, changed, err_msg, results = create(
client, subnet_id, allocation_id, client_token,
wait, wait_timeout, if_exist_do_not_create, check_mode=check_mode
)
return success, changed, err_msg, results
def remove(client, nat_gateway_id, wait=False, wait_timeout=0,
release_eip=False, check_mode=False):
"""Delete an Amazon NAT Gateway.
Args:
client (botocore.client.EC2): Boto3 client
nat_gateway_id (str): The Amazon nat id.
Kwargs:
wait (bool): Wait for the nat to be in the deleted state before returning.
wait_timeout (int): Number of seconds to wait, until this timeout is reached.
release_eip (bool): Once the nat has been deleted, you can deallocate the eip from the vpc.
Basic Usage:
>>> client = boto3.client('ec2')
>>> nat_gw_id = 'nat-03835afb6e31df79b'
>>> remove(client, nat_gw_id, wait=True, wait_timeout=500, release_eip=True)
[
true,
"",
{
"nat_gateway_id": "nat-03835afb6e31df79b",
"subnet_id": "subnet-w4t12897",
"nat_gateway_addresses": [
{
"public_ip": "52.87.29.36",
"network_interface_id": "eni-5579742d",
"private_ip": "10.0.0.102",
"allocation_id": "eipalloc-36014da3"
}
],
"state": "deleted",
"create_time": "2016-03-05T00:33:21.209000+00:00",
"delete_time": "2016-03-05T00:36:37.329000+00:00",
"vpc_id": "vpc-w68571b5"
}
]
Returns:
Tuple (bool, str, list)
"""
params = {
'NatGatewayId': nat_gateway_id
}
success = False
changed = False
err_msg = ""
results = list()
states = ['pending', 'available' ]
try:
exist, _, gw = (
get_nat_gateways(
client, nat_gateway_id=nat_gateway_id,
states=states, check_mode=check_mode
)
)
if exist and len(gw) == 1:
results = gw[0]
if not check_mode:
client.delete_nat_gateway(**params)
allocation_id = (
results['nat_gateway_addresses'][0]['allocation_id']
)
changed = True
success = True
err_msg = (
'NAT gateway {0} is in a deleting state. Delete was successful'
.format(nat_gateway_id)
)
if wait:
status_achieved, err_msg, results = (
wait_for_status(
client, wait_timeout, nat_gateway_id, 'deleted',
check_mode=check_mode
)
)
if status_achieved:
err_msg = (
'NAT gateway {0} was deleted successfully'
.format(nat_gateway_id)
)
except botocore.exceptions.ClientError as e:
err_msg = str(e)
if release_eip:
eip_released, eip_err = (
release_address(client, allocation_id, check_mode)
)
if not eip_released:
err_msg = (
"{0}: Failed to release EIP {1}: {2}"
.format(err_msg, allocation_id, eip_err)
)
success = False
return success, changed, err_msg, results
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
subnet_id=dict(type='str'),
eip_address=dict(type='str'),
allocation_id=dict(type='str'),
if_exist_do_not_create=dict(type='bool', default=False),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=320, required=False),
release_eip=dict(type='bool', default=False),
nat_gateway_id=dict(type='str'),
client_token=dict(type='str'),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['allocation_id', 'eip_address']
]
)
# Validate Requirements
if not HAS_BOTO3:
module.fail_json(msg='botocore/boto3 is required.')
state = module.params.get('state').lower()
check_mode = module.check_mode
subnet_id = module.params.get('subnet_id')
allocation_id = module.params.get('allocation_id')
eip_address = module.params.get('eip_address')
nat_gateway_id = module.params.get('nat_gateway_id')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
release_eip = module.params.get('release_eip')
client_token = module.params.get('client_token')
if_exist_do_not_create = module.params.get('if_exist_do_not_create')
try:
region, ec2_url, aws_connect_kwargs = (
get_aws_connection_info(module, boto3=True)
)
client = (
boto3_conn(
module, conn_type='client', resource='ec2',
region=region, endpoint=ec2_url, **aws_connect_kwargs
)
)
except botocore.exceptions.ClientError as e:
module.fail_json(msg="Boto3 Client Error - " + str(e.msg))
changed = False
err_msg = ''
if state == 'present':
if not subnet_id:
module.fail_json(msg='subnet_id is required for creation')
success, changed, err_msg, results = (
pre_create(
client, subnet_id, allocation_id, eip_address,
if_exist_do_not_create, wait, wait_timeout,
client_token, check_mode=check_mode
)
)
else:
if not nat_gateway_id:
module.fail_json(msg='nat_gateway_id is required for removal')
else:
success, changed, err_msg, results = (
remove(
client, nat_gateway_id, wait, wait_timeout, release_eip,
check_mode=check_mode
)
)
if not success:
module.fail_json(
msg=err_msg, success=success, changed=changed
)
else:
module.exit_json(
msg=err_msg, success=success, changed=changed, **results
)
if __name__ == '__main__':
main()
| gpl-3.0 |
matteli/alterAB | alterab/referentiel/formate.py | 1 | 2127 | from oosheet import OOSheet as S
import json
import string
def formate( ):
r=[]
r = tab2json('Config_referentiel', r)
r = tab2json('Savoir', r)
r = tab2json('Groupe_savoir', r)
r = tab2json('Competence', r)
r = tab2json('Groupe_competence', r)
r = tab2json('Tache', r)
r = tab2json('Groupe_tache', r)
r = tabm2m2json('Competence_dans_tache', r)
writeFile(r, '/home/matthieu/refjson.txt')
return None
def tab2json(feuille, r):
j=2
while S(feuille + '.a' + unicode(j)).string != "":
num = S(feuille + '.a' + unicode(j)).string
i=1
field={}
while S(feuille + '.' + string.lowercase[i] + '1').string != "":
field[S(feuille + '.' + string.lowercase[i] + '1').string]=S(feuille + '.' + string.lowercase[i] + unicode(j)).string
i=i+1
j=j+1
entry = {
u"model":u"suivi." + feuille,
u"pk":num,
u"fields":field,
}
r.append(entry)
return r
def tabm2m2json(feuille, r):
tab1=S(feuille + '.a2').string
tab2=S(feuille + '.b1').string
rel=S(feuille + '.b2').string
pk=0
i=3
while S(feuille + '.a' + unicode(i)).string != "" and S(feuille + '.a' + unicode(i)).string != "0":
j=2
while S(feuille + '.' + string.lowercase[j] + '1').string != "" and S(feuille + '.' + string.lowercase[j] + '1').string != "0" :
try:
num = int(float(S(feuille + '.' + string.lowercase[j] + unicode(i)).string))
except:
num = 0
if num>0:
pk=pk+1
entry = {
u"model":u"suivi." + feuille,
u"pk":pk,
u"fields":{
rel:num,
tab1:i-2,
tab2:j-1,
},
}
r.append(entry)
j=j+1
i=i+1
return r
def writeFile(source, name):
rjson=json.JSONEncoder().encode(source)
f = open(name, 'w')
f.write(rjson)
f.closed
return None
| gpl-2.0 |
Metaswitch/horizon | openstack_dashboard/dashboards/project/access_and_security/api_access/tests.py | 13 | 3260 | # Copyright 2012 Nebula Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.http import HttpRequest # noqa
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
API_URL = "horizon:project:access_and_security:api_access"
EC2_URL = reverse(API_URL + ":ec2")
OPENRC_URL = reverse(API_URL + ":openrc")
CREDS_URL = reverse(API_URL + ":view_credentials")
class APIAccessTests(test.TestCase):
def test_ec2_download_view(self):
creds = self.ec2.first()
cert = self.certs.first()
self.mox.StubOutWithMock(api.keystone, "list_ec2_credentials")
self.mox.StubOutWithMock(api.nova, "get_x509_credentials")
self.mox.StubOutWithMock(api.nova, "get_x509_root_certificate")
self.mox.StubOutWithMock(api.keystone, "create_ec2_credentials")
api.keystone.list_ec2_credentials(IsA(HttpRequest), self.user.id) \
.AndReturn([])
api.nova.get_x509_credentials(IsA(HttpRequest)).AndReturn(cert)
api.nova.get_x509_root_certificate(IsA(HttpRequest)) \
.AndReturn(cert)
api.keystone.create_ec2_credentials(IsA(HttpRequest),
self.user.id,
self.tenant.id).AndReturn(creds)
self.mox.ReplayAll()
res = self.client.get(EC2_URL)
self.assertEqual(res.status_code, 200)
self.assertEqual(res['content-type'], 'application/zip')
def test_openrc_credentials(self):
res = self.client.get(OPENRC_URL)
self.assertEqual(res.status_code, 200)
openrc = 'project/access_and_security/api_access/openrc.sh.template'
self.assertTemplateUsed(res, openrc)
name = 'export OS_USERNAME="{}"'.format(self.request.user.username)
id = 'export OS_TENANT_ID={}'.format(self.request.user.tenant_id)
self.assertTrue(name in res.content)
self.assertTrue(id in res.content)
@test.create_stubs({api.keystone: ("list_ec2_credentials",)})
def test_credential_api(self):
certs = self.ec2.list()
api.keystone.list_ec2_credentials(IsA(HttpRequest), self.user.id) \
.AndReturn(certs)
self.mox.ReplayAll()
res = self.client.get(CREDS_URL)
self.assertEqual(res.status_code, 200)
credentials = 'project/access_and_security/api_access/credentials.html'
self.assertTemplateUsed(res, credentials)
self.assertEqual(self.user.id, res.context['openrc_creds']['user'].id)
self.assertEqual(certs[0].access,
res.context['ec2_creds']['ec2_access_key'])
| apache-2.0 |
barakschiller/experiment | experiment/api/services/assignment_service.py | 1 | 1391 | from api.services import escape_name
class AssignmentService(object):
def __init__(self, storage):
self.storage = storage
def assign(self, experiment, entity):
"""
Query the variant to be used for the given entity
:type experiment: experiment.core.experiment.Experiment
"""
existing_assignment = self.storage.get(self._key(experiment.name, entity))
if existing_assignment is not None:
return existing_assignment
assignment = experiment.assign(entity)
self.storage.store(self._key(experiment.name, entity), assignment)
return assignment
def manual_assign(self, experiment, entity, variant_name):
"""
Manually set a variant for an entity
:type experiment: experiment.core.experiment.Experiment
"""
if variant_name not in experiment.variant_names:
raise ValueError("Invalid variant")
if self.storage.get(self._key(experiment.name, entity)) is None:
self.storage.store(self._key(experiment.name, entity), variant_name)
else:
self.storage.update(self._key(experiment.name, entity), variant_name)
return variant_name
@staticmethod
def _key(experiment_name, entity_name):
return 'experiment:{}:entity:{}'.format(escape_name(experiment_name), escape_name(entity_name))
| bsd-3-clause |
thaim/ansible | lib/ansible/modules/remote_management/ucs/ucs_org.py | 18 | 6663 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: ucs_org
short_description: Manages UCS Organizations for UCS Manager
description:
- Manages UCS Organizations for UCS Manager.
- Examples can be used with the UCS Platform Emulator U(https://cs.co/ucspe).
extends_documentation_fragment: ucs
options:
state:
description:
- If C(absent), will remove organization.
- If C(present), will create or update organization.
choices: [absent, present]
default: present
type: str
org_name:
description:
- The name of the organization.
- Enter up to 16 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ name ]
type: str
parent_org_path:
description:
- A forward slash / separated hierarchical path from the root organization to the parent of the organization to be added or updated.
- UCS Manager supports a hierarchical structure of organizations up to five levels deep not including the root organization.
- For example the parent_org_path for an organization named level5 could be root/level1/level2/level3/level4/level5
default: root
type: str
description:
description:
- A user-defined description of the organization.
- Enter up to 256 characters.
- "You can use any characters or spaces except the following:"
- "` (accent mark), \ (backslash), ^ (carat), \" (double quote), = (equal sign), > (greater than), < (less than), or ' (single quote)."
aliases: [ descr ]
type: str
delegate_to:
description:
- Where the module will be run
default: localhost
type: str
requirements:
- ucsmsdk
author:
- John McDonough (@movinalot)
- CiscoUcs (@CiscoUcs)
version_added: "2.8"
'''
EXAMPLES = r'''
- name: Add UCS Organization
ucs_org:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
org_name: test
description: testing org
state: present
delegate_to: localhost
- name: Update UCS Organization
ucs_org:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
org_name: test
description: Testing org
state: present
delegate_to: localhost
- name: Add UCS Organization
ucs_org:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
org_name: level1
parent_org_path: root
description: level1 org
state: present
delegate_to: localhost
- name: Add UCS Organization
ucs_org:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
org_name: level2
parent_org_path: root/level1
description: level2 org
state: present
- name: Add UCS Organization
ucs_org:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
org_name: level3
parent_org_path: root/level1/level2
description: level3 org
state: present
- name: Remove UCS Organization
ucs_org:
hostname: "{{ ucs_hostname }}"
username: "{{ ucs_username }}"
password: "{{ ucs_password }}"
org_name: level2
parent_org_path: root/level1/
state: absent
'''
RETURN = r'''
#
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.remote_management.ucs import UCSModule, ucs_argument_spec
def main():
argument_spec = ucs_argument_spec
argument_spec.update(
org_name=dict(type='str', aliases=['name']),
parent_org_path=dict(type='str', default='root'),
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['present', 'absent']),
delegate_to=dict(type='str', default='localhost'),
)
module = AnsibleModule(
argument_spec,
supports_check_mode=True,
required_if=[
['state', 'present', ['org_name']],
],
)
# UCSModule verifies ucsmsdk is present and exits on failure.
# Imports are below for UCS object creation.
ucs = UCSModule(module)
from ucsmsdk.mometa.org.OrgOrg import OrgOrg
err = False
changed = False
requested_state = module.params['state']
kwargs = dict()
if module.params['description'] is not None:
kwargs['descr'] = module.params['description']
try:
parent_org_dn = 'org-' + module.params['parent_org_path'].replace('/', '/org-')
dn = parent_org_dn + '/org-' + module.params['org_name']
mo = ucs.login_handle.query_dn(dn)
# Determine state change
if mo:
# Object exists, if it should exist has anything changed?
if requested_state == 'present':
# Do some or all Object properties not match, that is a change
if not mo.check_prop_match(**kwargs):
changed = True
# Object does not exist but should, that is a change
else:
if requested_state == 'present':
changed = True
# Object exists but should not, that is a change
if mo and requested_state == 'absent':
changed = True
# Apply state if not check_mode
if changed and not module.check_mode:
if requested_state == 'absent':
ucs.login_handle.remove_mo(mo)
else:
kwargs['parent_mo_or_dn'] = parent_org_dn
kwargs['name'] = module.params['org_name']
if module.params['description'] is not None:
kwargs['descr'] = module.params['description']
mo = OrgOrg(**kwargs)
ucs.login_handle.add_mo(mo, modify_present=True)
ucs.login_handle.commit()
except Exception as e:
err = True
ucs.result['msg'] = "setup error: %s " % str(e)
ucs.result['changed'] = changed
if err:
module.fail_json(**ucs.result)
module.exit_json(**ucs.result)
if __name__ == '__main__':
main()
| mit |
SatoshiNXSimudrone/sl4a-damon-clone | python/src/Lib/curses/__init__.py | 61 | 1874 | """curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initwin()
...
"""
__revision__ = "$Id: __init__.py 61064 2008-02-25 16:29:58Z andrew.kuchling $"
from _curses import *
from curses.wrapper import wrapper
import os as _os
import sys as _sys
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def initscr():
import _curses, curses
# we call setupterm() here because it raises an error
# instead of calling exit() in error cases.
setupterm(term=_os.environ.get("TERM", "unknown"),
fd=_sys.__stdout__.fileno())
stdscr = _curses.initscr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from has_key import has_key
| apache-2.0 |
danielharbor/openerp | build/lib/openerp/report/render/rml.py | 457 | 3244 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import render
import rml2pdf
import rml2html as htmlizer
import rml2txt as txtizer
import odt2odt as odt
import html2html as html
import makohtml2html as makohtml
class rml(render.render):
def __init__(self, rml, localcontext = None, datas=None, path='.', title=None):
render.render.__init__(self, datas, path)
self.localcontext = localcontext
self.rml = rml
self.output_type = 'pdf'
self.title=title
def _render(self):
return rml2pdf.parseNode(self.rml, self.localcontext, images=self.bin_datas, path=self.path,title=self.title)
class rml2html(render.render):
def __init__(self, rml,localcontext = None, datas=None):
super(rml2html, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return htmlizer.parseString(self.rml,self.localcontext)
class rml2txt(render.render):
def __init__(self, rml, localcontext= None, datas=None):
super(rml2txt, self).__init__(datas)
self.rml = rml
self.localcontext = localcontext
self.output_type = 'txt'
def _render(self):
return txtizer.parseString(self.rml, self.localcontext)
class odt2odt(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'odt'
def _render(self):
return odt.parseNode(self.rml_dom,self.localcontext)
class html2html(render.render):
def __init__(self, rml, localcontext=None, datas=None):
render.render.__init__(self, datas)
self.rml_dom = rml
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return html.parseString(self.rml_dom,self.localcontext)
class makohtml2html(render.render):
def __init__(self, html, localcontext = None):
render.render.__init__(self)
self.html = html
self.localcontext = localcontext
self.output_type = 'html'
def _render(self):
return makohtml.parseNode(self.html,self.localcontext)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mcgonagle/ansible_f5 | library_old/bigip_config.py | 3 | 10671 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_config
short_description: Manage BIG-IP configuration sections
description:
- Manages a BIG-IP configuration by allowing TMSH commands that
modify running configuration, or,
version_added: "2.4"
options:
save:
description:
- The C(save) argument instructs the module to save the
running-config to startup-config. This operation is performed
after any changes are made to the current running config. If
no changes are made, the configuration is still saved to the
startup config. This option will always cause the module to
return changed.
choices:
- yes
- no
required: False
default: False
reset:
description:
- Loads the default configuration on the device. If this option
is specified, the default configuration will be loaded before
any commands or other provided configuration is run.
required: False
default: False
merge_content:
description:
- Loads the specified configuration that you want to merge into
the running configuration. This is equivalent to using the
C(tmsh) command C(load sys config from-terminal merge). If
you need to read configuration from a file or template, use
Ansible's C(file) or C(template) lookup plugins respectively.
verify:
description:
- Validates the specified configuration to see whether they are
valid to replace the running configuration. The running
configuration will not be changed.
required: False
default: True
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
- Requires Ansible >= 2.3.
requirements:
- f5-sdk >= 2.2.3
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Save the running configuration of the BIG-IP
bigip_config:
save: yes
server: "lb.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
- name: Reset the BIG-IP configuration, for example, to RMA the device
bigip_config:
reset: yes
save: yes
server: "lb.mydomain.com"
password: "secret"
user: "admin"
validate_certs: "no"
delegate_to: localhost
'''
RETURN = '''
stdout:
description: The set of responses from the options
returned: always
type: list
sample: ['...', '...']
stdout_lines:
description: The value of stdout split into a list
returned: always
type: list
sample: [['...', '...'], ['...'], ['...']]
'''
import os
import tempfile
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from ansible.module_utils.f5_utils import *
from ansible.module_utils.basic import BOOLEANS
class Parameters(AnsibleF5Parameters):
returnables = ['stdout', 'stdout_lines']
def __init__(self, params=None):
self._values = defaultdict(lambda: None)
if params:
self.update(params)
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def update(self, params=None):
if params:
for k,v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Parameters(changed)
def _to_lines(self, stdout):
lines = list()
for item in stdout:
if isinstance(item, string_types):
item = str(item).split('\n')
lines.append(item)
return lines
def exec_module(self):
result = dict()
try:
self.execute()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.changes.to_return())
result.update(dict(changed=True))
return result
def execute(self):
responses = []
if self.want.reset:
response = self.reset()
responses.append(response)
if self.want.merge_content:
if self.want.verify:
response = self.merge(verify=True)
responses.append(response)
else:
response = self.merge(verify=False)
responses.append(response)
if self.want.save:
response = self.save()
responses.append(response)
self.changes = Parameters({
'stdout': responses,
'stdout_lines': self._to_lines(responses)
})
def reset(self):
if self.client.check_mode:
return True
return self.reset_device()
def reset_device(self):
command = 'tmsh load sys config default'
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(command)
)
if hasattr(output, 'commandResult'):
return str(output.commandResult)
return None
def merge(self, verify=True):
temp_name = next(tempfile._get_candidate_names())
remote_path = "/var/config/rest/downloads/{0}".format(temp_name)
temp_path = '/tmp/' + temp_name
if self.client.check_mode:
return True
self.upload_to_device(temp_name)
self.move_on_device(remote_path)
response = self.merge_on_device(
remote_path=temp_path, verify=verify
)
self.remove_temporary_file(remote_path=temp_path)
return response
def merge_on_device(self, remote_path, verify=True):
result = None
command = 'tmsh load sys config file {0} merge'.format(
remote_path
)
print command
if verify:
command += ' verify'
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(command)
)
if hasattr(output, 'commandResult'):
result = str(output.commandResult)
return result
def remove_temporary_file(self, remote_path):
self.client.api.tm.util.unix_rm.exec_cmd(
'run',
utilCmdArgs=remote_path
)
def move_on_device(self, remote_path):
self.client.api.tm.util.unix_mv.exec_cmd(
'run',
utilCmdArgs='{0} /tmp/{1}'.format(
remote_path, os.path.basename(remote_path)
)
)
def upload_to_device(self, temp_name):
template = StringIO(self.want.merge_content)
upload = self.client.api.shared.file_transfer.uploads
upload.upload_stringio(template, temp_name)
def save(self):
if self.client.check_mode:
return True
return self.save_on_device()
def save_on_device(self):
result = None
command = 'tmsh save sys config'
output = self.client.api.tm.util.bash.exec_cmd(
'run',
utilCmdArgs='-c "{0}"'.format(command)
)
if hasattr(output, 'commandResult'):
result = str(output.commandResult)
return result
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.argument_spec = dict(
reset=dict(
required=False,
type='bool',
default=False,
choices=BOOLEANS
),
merge_content=dict(
required=False,
default=None,
type='str'
),
verify=dict(
type='bool',
required=False,
default=True,
choices=BOOLEANS
),
save=dict(
type='bool',
required=False,
default=False,
choices=BOOLEANS
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| apache-2.0 |
ZSeaPeng/python | PyBeaner/0007/code_lines.py | 36 | 1086 | # coding=utf-8
__author__ = 'PyBeaner'
import os
import fnmatch
total_lines = 0
code_lines = 0
empty_lines = 0
comment_lines = 0
def count_line(line):
line = line.lstrip()
global comment_lines, empty_lines, total_lines, code_lines
total_lines += 1
if line.startswith("#"):
comment_lines += 1
elif not line:
empty_lines += 1
else:
code_lines += 1
def scan_dir(directory, suffix="*.py"):
directory = os.path.abspath(directory)
print("Scanning files in %s ..." % directory)
for cur_dir, dirs, files in os.walk(directory):
for file in files:
if not fnmatch.fnmatch(file, suffix):
continue
file_path = os.path.join(cur_dir, file)
with open(file_path, errors="replace") as f:
for line in f:
count_line(line)
if __name__ == '__main__':
scan_dir(r"../..")
print("Total lines:%d" % total_lines)
print("Code lines:%d" % code_lines)
print("Empty lines:%d" % empty_lines)
print("Comment lines:%d" % comment_lines)
| mit |
arnavd96/Cinemiezer | myvenv/lib/python3.4/site-packages/unidecode/x074.py | 252 | 4696 | data = (
'Han ', # 0x00
'Xuan ', # 0x01
'Yan ', # 0x02
'Qiu ', # 0x03
'Quan ', # 0x04
'Lang ', # 0x05
'Li ', # 0x06
'Xiu ', # 0x07
'Fu ', # 0x08
'Liu ', # 0x09
'Ye ', # 0x0a
'Xi ', # 0x0b
'Ling ', # 0x0c
'Li ', # 0x0d
'Jin ', # 0x0e
'Lian ', # 0x0f
'Suo ', # 0x10
'Chiisai ', # 0x11
'[?] ', # 0x12
'Wan ', # 0x13
'Dian ', # 0x14
'Pin ', # 0x15
'Zhan ', # 0x16
'Cui ', # 0x17
'Min ', # 0x18
'Yu ', # 0x19
'Ju ', # 0x1a
'Chen ', # 0x1b
'Lai ', # 0x1c
'Wen ', # 0x1d
'Sheng ', # 0x1e
'Wei ', # 0x1f
'Dian ', # 0x20
'Chu ', # 0x21
'Zhuo ', # 0x22
'Pei ', # 0x23
'Cheng ', # 0x24
'Hu ', # 0x25
'Qi ', # 0x26
'E ', # 0x27
'Kun ', # 0x28
'Chang ', # 0x29
'Qi ', # 0x2a
'Beng ', # 0x2b
'Wan ', # 0x2c
'Lu ', # 0x2d
'Cong ', # 0x2e
'Guan ', # 0x2f
'Yan ', # 0x30
'Diao ', # 0x31
'Bei ', # 0x32
'Lin ', # 0x33
'Qin ', # 0x34
'Pi ', # 0x35
'Pa ', # 0x36
'Que ', # 0x37
'Zhuo ', # 0x38
'Qin ', # 0x39
'Fa ', # 0x3a
'[?] ', # 0x3b
'Qiong ', # 0x3c
'Du ', # 0x3d
'Jie ', # 0x3e
'Hun ', # 0x3f
'Yu ', # 0x40
'Mao ', # 0x41
'Mei ', # 0x42
'Chun ', # 0x43
'Xuan ', # 0x44
'Ti ', # 0x45
'Xing ', # 0x46
'Dai ', # 0x47
'Rou ', # 0x48
'Min ', # 0x49
'Zhen ', # 0x4a
'Wei ', # 0x4b
'Ruan ', # 0x4c
'Huan ', # 0x4d
'Jie ', # 0x4e
'Chuan ', # 0x4f
'Jian ', # 0x50
'Zhuan ', # 0x51
'Yang ', # 0x52
'Lian ', # 0x53
'Quan ', # 0x54
'Xia ', # 0x55
'Duan ', # 0x56
'Yuan ', # 0x57
'Ye ', # 0x58
'Nao ', # 0x59
'Hu ', # 0x5a
'Ying ', # 0x5b
'Yu ', # 0x5c
'Huang ', # 0x5d
'Rui ', # 0x5e
'Se ', # 0x5f
'Liu ', # 0x60
'Shi ', # 0x61
'Rong ', # 0x62
'Suo ', # 0x63
'Yao ', # 0x64
'Wen ', # 0x65
'Wu ', # 0x66
'Jin ', # 0x67
'Jin ', # 0x68
'Ying ', # 0x69
'Ma ', # 0x6a
'Tao ', # 0x6b
'Liu ', # 0x6c
'Tang ', # 0x6d
'Li ', # 0x6e
'Lang ', # 0x6f
'Gui ', # 0x70
'Zhen ', # 0x71
'Qiang ', # 0x72
'Cuo ', # 0x73
'Jue ', # 0x74
'Zhao ', # 0x75
'Yao ', # 0x76
'Ai ', # 0x77
'Bin ', # 0x78
'Tu ', # 0x79
'Chang ', # 0x7a
'Kun ', # 0x7b
'Zhuan ', # 0x7c
'Cong ', # 0x7d
'Jin ', # 0x7e
'Yi ', # 0x7f
'Cui ', # 0x80
'Cong ', # 0x81
'Qi ', # 0x82
'Li ', # 0x83
'Ying ', # 0x84
'Suo ', # 0x85
'Qiu ', # 0x86
'Xuan ', # 0x87
'Ao ', # 0x88
'Lian ', # 0x89
'Man ', # 0x8a
'Zhang ', # 0x8b
'Yin ', # 0x8c
'[?] ', # 0x8d
'Ying ', # 0x8e
'Zhi ', # 0x8f
'Lu ', # 0x90
'Wu ', # 0x91
'Deng ', # 0x92
'Xiou ', # 0x93
'Zeng ', # 0x94
'Xun ', # 0x95
'Qu ', # 0x96
'Dang ', # 0x97
'Lin ', # 0x98
'Liao ', # 0x99
'Qiong ', # 0x9a
'Su ', # 0x9b
'Huang ', # 0x9c
'Gui ', # 0x9d
'Pu ', # 0x9e
'Jing ', # 0x9f
'Fan ', # 0xa0
'Jin ', # 0xa1
'Liu ', # 0xa2
'Ji ', # 0xa3
'[?] ', # 0xa4
'Jing ', # 0xa5
'Ai ', # 0xa6
'Bi ', # 0xa7
'Can ', # 0xa8
'Qu ', # 0xa9
'Zao ', # 0xaa
'Dang ', # 0xab
'Jiao ', # 0xac
'Gun ', # 0xad
'Tan ', # 0xae
'Hui ', # 0xaf
'Huan ', # 0xb0
'Se ', # 0xb1
'Sui ', # 0xb2
'Tian ', # 0xb3
'[?] ', # 0xb4
'Yu ', # 0xb5
'Jin ', # 0xb6
'Lu ', # 0xb7
'Bin ', # 0xb8
'Shou ', # 0xb9
'Wen ', # 0xba
'Zui ', # 0xbb
'Lan ', # 0xbc
'Xi ', # 0xbd
'Ji ', # 0xbe
'Xuan ', # 0xbf
'Ruan ', # 0xc0
'Huo ', # 0xc1
'Gai ', # 0xc2
'Lei ', # 0xc3
'Du ', # 0xc4
'Li ', # 0xc5
'Zhi ', # 0xc6
'Rou ', # 0xc7
'Li ', # 0xc8
'Zan ', # 0xc9
'Qiong ', # 0xca
'Zhe ', # 0xcb
'Gui ', # 0xcc
'Sui ', # 0xcd
'La ', # 0xce
'Long ', # 0xcf
'Lu ', # 0xd0
'Li ', # 0xd1
'Zan ', # 0xd2
'Lan ', # 0xd3
'Ying ', # 0xd4
'Mi ', # 0xd5
'Xiang ', # 0xd6
'Xi ', # 0xd7
'Guan ', # 0xd8
'Dao ', # 0xd9
'Zan ', # 0xda
'Huan ', # 0xdb
'Gua ', # 0xdc
'Bo ', # 0xdd
'Die ', # 0xde
'Bao ', # 0xdf
'Hu ', # 0xe0
'Zhi ', # 0xe1
'Piao ', # 0xe2
'Ban ', # 0xe3
'Rang ', # 0xe4
'Li ', # 0xe5
'Wa ', # 0xe6
'Dekaguramu ', # 0xe7
'Jiang ', # 0xe8
'Qian ', # 0xe9
'Fan ', # 0xea
'Pen ', # 0xeb
'Fang ', # 0xec
'Dan ', # 0xed
'Weng ', # 0xee
'Ou ', # 0xef
'Deshiguramu ', # 0xf0
'Miriguramu ', # 0xf1
'Thon ', # 0xf2
'Hu ', # 0xf3
'Ling ', # 0xf4
'Yi ', # 0xf5
'Ping ', # 0xf6
'Ci ', # 0xf7
'Hekutogura ', # 0xf8
'Juan ', # 0xf9
'Chang ', # 0xfa
'Chi ', # 0xfb
'Sarake ', # 0xfc
'Dang ', # 0xfd
'Meng ', # 0xfe
'Pou ', # 0xff
)
| mit |
Arakmar/Sick-Beard | tests/all_tests.py | 22 | 1454 | #!/usr/bin/env python
# coding=UTF-8
# Author: Dennis Lutter <lad1337@gmail.com>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Fountion, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
if __name__ == "__main__":
import glob
import unittest
test_file_strings = [ x for x in glob.glob('*_tests.py') if not x in __file__]
module_strings = [file_string[0:len(file_string) - 3] for file_string in test_file_strings]
suites = [unittest.defaultTestLoader.loadTestsFromName(file_string) for file_string in module_strings]
testSuite = unittest.TestSuite(suites)
print "=================="
print "STARTING - ALL TESTS"
print "=================="
print "this will include"
for includedfiles in test_file_strings:
print "- " + includedfiles
text_runner = unittest.TextTestRunner().run(testSuite)
| gpl-3.0 |
tkasp/osmose-backend | plugins/Administrative_TooManyWays.py | 4 | 3906 | #-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Yoann Arnaud <yarnaud@crans.org> 2009 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
from modules.OsmoseTranslation import T_
from plugins.Plugin import Plugin
class Administrative_TooManyWays(Plugin):
def init(self, logger):
Plugin.init(self, logger)
self.errors[504] = self.def_class(item = 6020, level = 3, tags = ['boundary', 'fix:chair'],
title = T_('Duplicated way in relation'),
detail = T_(
'''In a relation, a way should be present only once.'''),
fix = T_(
'''Most often, this is a user issue that added several times the same way.
The editor JOSM can easily visualize the relationships and see duplicates
(in color).'''),
trap = T_(
'''Double check the ways roles in the relation before deleting.
Caution: in a route, a path can be taken several times. The multiple
presence of this path in the relation `type=route` is not then an issue.
Then ensure the roles `forward` and `backward`.'''))
def relation(self, data, tags, members):
if tags.get(u"boundary", u"") != u"administrative":
return
w = [m[u"ref"] for m in members if m[u"type"] == u"way"]
if len(w) != len(set(w)):
return {"class": 504}
#if tags.get(u"admin_level", u"") != u"8":
# return
#n_limit = 15
#n = len(data[u"member"])
#if n >= n_limit:
# return {"class": 503, "subclass": 0, "text": T_("More than {0} ways in admin_level=8 relation ({1})", str(n_limit), str(n))}
###########################################################################
from plugins.Plugin import TestPluginCommon
class Test(TestPluginCommon):
def setUp(self):
TestPluginCommon.setUp(self)
self.p = Administrative_TooManyWays(None)
self.p.init(None)
def test(self):
w1_0 = { "ref": 1, "role": "xx", "type": "way"}
w1_1 = { "ref": 1, "role": "yy", "type": "way"}
w2 = { "ref": 2, "role": "xx", "type": "way"}
w3 = { "ref": 2, "role": "xx", "type": "way"}
n1 = { "ref": 1, "role": "xx", "type": "node"}
self.check_err(self.p.relation(None, {"boundary": "administrative"}, [w1_0, w1_1]))
self.check_err(self.p.relation(None, {"boundary": "administrative"}, [w1_0, w1_1, w2, w3, n1]))
assert not self.p.relation(None, {"boundary": "administrative"}, [w1_0, w2])
assert not self.p.relation(None, {"boundary": "administrative"}, [w1_0, n1])
assert not self.p.relation(None, {}, [w1_0, w1_1])
assert not self.p.relation(None, {}, [w1_0, w1_1])
| gpl-3.0 |
kod3r/pyNES | pynes/compiler.py | 28 | 14864 | # -*- coding: utf-8 -*-
from re import match
import pynes
from pynes.analyzer import analyse
from pynes.c6502 import opcodes, address_mode_def
import io
import inspect
from binascii import hexlify
from pynes.directives import directive_list
from pynes.cartridge import Cartridge
asm65_tokens = [
dict(type='T_INSTRUCTION',
regex=(r'^(ADC|AND|ASL|BCC|BCS|BEQ|BIT|BMI|BNE|BPL|BRK|BVC|BVS|CLC|'
'CLD|CLI|CLV|CMP|CPX|CPY|DEC|DEX|DEY|EOR|INC|INX|INY|JMP|JSR|'
'LDA|LDX|LDY|LSR|NOP|ORA|PHA|PHP|PLA|PLP|ROL|ROR|RTI|RTS|SBC|'
'SEC|SED|SEI|STA|STX|STY|TAX|TAY|TSX|TXA|TXS|TYA)'),
store=True),
dict(type='T_ADDRESS', regex=r'\$([\dA-F]{2,4})', store=True),
dict(type='T_HEX_NUMBER', regex=r'\#\$([\dA-F]{2})', store=True),
dict(type='T_BINARY_NUMBER', regex=r'\#?%([01]{8})', store=True),
dict(type='T_DECIMAL_NUMBER', regex=r'\#(\d{1,3})', store=True),
dict(type='T_LABEL', regex=r'^([a-zA-Z]{2}[a-zA-Z\d]*)\:', store=True),
dict(type='T_MARKER', regex=r'^[a-zA-Z]{2}[a-zA-Z\d]*', store=True),
dict(type='T_STRING', regex=r'^"[^"]*"', store=True),
dict(type='T_SEPARATOR', regex=r'^,', store=True),
dict(type='T_ACCUMULATOR', regex=r'^(A|a)', store=True),
dict(type='T_REGISTER', regex=r'^(X|x|Y|y)', store=True),
dict(type='T_MODIFIER', regex=r'^(#LOW|#HIGH)', store=True),
dict(type='T_OPEN', regex=r'^\(', store=True),
dict(type='T_CLOSE', regex=r'^\)', store=True),
dict(type='T_OPEN_SQUARE_BRACKETS', regex=r'^\[', store=True),
dict(type='T_CLOSE_SQUARE_BRACKETS', regex=r'^\]', store=True),
dict(type='T_DIRECTIVE', regex=r'^\.[a-z]+', store=True),
dict(type='T_DECIMAL_ARGUMENT', regex=r'^[\d]+', store=True),
dict(type='T_ENDLINE', regex=r'^\n', store=True),
dict(type='T_WHITESPACE', regex=r'^[ \t\r]+', store=False),
dict(type='T_COMMENT', regex=r'^;[^\n]*', store=False)
]
def look_ahead(tokens, index, type, value=None):
if index > len(tokens) - 1:
return 0
token = tokens[index]
if token['type'] == type:
if value is None or token['value'].upper() == value.upper():
return 1
return 0
def t_endline(tokens, index):
return look_ahead(tokens, index, 'T_ENDLINE', '\n')
def t_modifier(tokens, index):
return look_ahead(tokens, index, 'T_MODIFIER')
def t_directive(tokens, index):
return look_ahead(tokens, index, 'T_DIRECTIVE')
def t_directive_argument(tokens, index):
return OR([t_list, t_address, t_marker, t_address,
t_decimal_argument, t_string], tokens, index)
def t_decimal_argument(tokens, index):
return look_ahead(tokens, index, 'T_DECIMAL_ARGUMENT')
def t_relative(tokens, index):
if (look_ahead(tokens, index, 'T_INSTRUCTION')
and tokens[index]['value'] in ['BCC', 'BCS', 'BEQ', 'BNE',
'BMI', 'BPL', 'BVC', 'BVS']):
return 1
return 0
def t_instruction(tokens, index):
return look_ahead(tokens, index, 'T_INSTRUCTION')
def t_zeropage(tokens, index):
lh = look_ahead(tokens, index, 'T_ADDRESS')
if lh and len(tokens[index]['value']) == 3:
return 1
return 0
def t_label(tokens, index):
return look_ahead(tokens, index, 'T_LABEL')
def t_marker(tokens, index):
return look_ahead(tokens, index, 'T_MARKER')
def t_address(tokens, index):
return look_ahead(tokens, index, 'T_ADDRESS')
def t_string(tokens, index):
return look_ahead(tokens, index, 'T_STRING')
def t_address_or_t_marker(tokens, index):
return OR([t_address, t_marker], tokens, index)
def t_address_or_t_binary_number(tokens, index):
return OR([t_address, t_binary_number], tokens, index)
def t_hex_number(tokens, index):
return look_ahead(tokens, index, 'T_HEX_NUMBER')
def t_binary_number(tokens, index):
return look_ahead(tokens, index, 'T_BINARY_NUMBER')
def t_decimal_number(tokens, index):
return look_ahead(tokens, index, 'T_DECIMAL_NUMBER')
def t_number(tokens, index):
return OR([t_hex_number, t_binary_number, t_decimal_number], tokens, index)
def t_separator(tokens, index):
return look_ahead(tokens, index, 'T_SEPARATOR')
def t_accumulator(tokens, index):
return look_ahead(tokens, index, 'T_ACCUMULATOR', 'A')
def t_register_x(tokens, index):
return look_ahead(tokens, index, 'T_REGISTER', 'X')
def t_register_y(tokens, index):
return look_ahead(tokens, index, 'T_REGISTER', 'Y')
def t_open(tokens, index):
return look_ahead(tokens, index, 'T_OPEN', '(')
def t_close(tokens, index):
return look_ahead(tokens, index, 'T_CLOSE', ')')
def t_open_square_brackets(tokens, index):
return look_ahead(tokens, index, 'T_OPEN_SQUARE_BRACKETS', '[')
def t_close_square_brackets(tokens, index):
return look_ahead(tokens, index, 'T_CLOSE_SQUARE_BRACKETS', ']')
def t_nesasm_compatible_open(tokens, index):
return OR([t_open, t_open_square_brackets], tokens, index)
def t_nesasm_compatible_close(tokens, index):
return OR([t_close, t_close_square_brackets], tokens, index)
def t_list(tokens, index):
if (t_address_or_t_binary_number(tokens, index)
and t_separator(tokens, index + 1)):
islist = 1
arg = 0
while (islist):
islist = islist and t_separator(tokens, index + (arg * 2) + 1)
var_index = index + (arg * 2) + 2
islist = islist and t_address_or_t_binary_number(tokens, var_index)
if (t_endline(tokens, index + (arg * 2) + 3)
or (index + (arg * 2) + 3) == len(tokens)):
break
arg += 1
if islist:
return ((arg + 1) * 2) + 1
return 0
def get_list_jump(tokens, index):
keep = True
a = 0
while keep:
keep = keep & (
t_address(tokens, index + a) | t_separator(tokens, index + a))
a += 1
return a
def OR(args, tokens, index):
for t in args:
if t(tokens, index):
return t(tokens, index)
return 0
asm65_bnf = [
dict(type='S_RS', bnf=[t_marker, t_directive, t_directive_argument]),
dict(type='S_DIRECTIVE', bnf=[t_directive, t_directive_argument]),
dict(type='S_RELATIVE', bnf=[t_relative, t_address_or_t_marker]),
dict(type='S_IMMEDIATE', bnf=[t_instruction, t_number]),
dict(type='S_IMMEDIATE_WITH_MODIFIER',
bnf=[t_instruction, t_modifier, t_open, t_address_or_t_marker,
t_close]),
dict(type='S_ACCUMULATOR', bnf=[t_instruction, t_accumulator]),
dict(type='S_ZEROPAGE_X',
bnf=[t_instruction, t_zeropage, t_separator, t_register_x]),
dict(type='S_ZEROPAGE_Y',
bnf=[t_instruction, t_zeropage, t_separator, t_register_y]),
dict(type='S_ZEROPAGE', bnf=[t_instruction, t_zeropage]),
dict(type='S_ABSOLUTE_X',
bnf=[t_instruction, t_address_or_t_marker, t_separator,
t_register_x]),
dict(type='S_ABSOLUTE_Y',
bnf=[t_instruction, t_address_or_t_marker, t_separator,
t_register_y]),
dict(type='S_ABSOLUTE', bnf=[t_instruction, t_address_or_t_marker]),
dict(type='S_INDIRECT_X',
bnf=[t_instruction, t_nesasm_compatible_open, t_address_or_t_marker,
t_separator, t_register_x, t_nesasm_compatible_close]),
dict(type='S_INDIRECT_Y',
bnf=[t_instruction, t_nesasm_compatible_open, t_address_or_t_marker,
t_nesasm_compatible_close, t_separator, t_register_y]),
dict(type='S_IMPLIED', bnf=[t_instruction]),
]
def lexical(code):
return analyse(code, asm65_tokens) # A generator
def get_value(token, labels=[]):
if token['type'] == 'T_ADDRESS':
m = match(asm65_tokens[1]['regex'], token['value'])
return int(m.group(1), 16)
elif token['type'] == 'T_HEX_NUMBER':
m = match(asm65_tokens[2]['regex'], token['value'])
return int(m.group(1), 16)
elif token['type'] == 'T_BINARY_NUMBER':
m = match(asm65_tokens[3]['regex'], token['value'])
return int(m.group(1), 2)
elif token['type'] == 'T_DECIMAL_NUMBER':
m = match(asm65_tokens[4]['regex'], token['value'])
return int(m.group(1), 10)
elif token['type'] == 'T_LABEL':
m = match(asm65_tokens[5]['regex'], token['value'])
return m.group(1)
elif token['type'] == 'T_MARKER' and token['value'] in labels:
return labels[token['value']]
elif token['type'] == 'T_DECIMAL_ARGUMENT':
return int(token['value'])
elif token['type'] == 'T_STRING':
return token['value'][1:-1]
else:
raise Exception('could not get value:' + token['type'] +
token['value'] + str(token['line']))
def syntax(tokens):
tokens = list(tokens)
ast = []
x = 0 # consumed
# debug = 0
labels = []
move = 0
while (x < len(tokens)):
if t_label(tokens, x):
labels.append(get_value(tokens[x]))
x += 1
elif t_endline(tokens, x):
x += 1
else:
for bnf in asm65_bnf:
leaf = {}
look_ahead = 0
move = 0
for i in bnf['bnf']:
move = i(tokens, x + look_ahead)
if not move:
break
look_ahead += 1
if move:
if len(labels) > 0:
leaf['labels'] = labels
labels = []
size = 0
look_ahead = 0
for b in bnf['bnf']:
size += b(tokens, x + look_ahead)
look_ahead += 1
leaf['children'] = tokens[x: x + size]
leaf['type'] = bnf['type']
ast.append(leaf)
x += size
break
if not move:
# TODO: deal with erros like on nodeNES
# walk = 0
print('------------')
print(tokens[x])
print(tokens[x + 1])
print(tokens[x + 2])
print(tokens[x + 3])
raise(Exception('UNKNOW TOKEN'))
return ast
def get_labels(ast):
labels = {}
address = 0
for leaf in ast:
if ('S_DIRECTIVE' == leaf['type']
and '.org' == leaf['children'][0]['value']):
address = int(leaf['children'][1]['value'][1:], 16)
if 'labels' in leaf:
for label in leaf['labels']:
labels[label] = address
if leaf['type'] != 'S_DIRECTIVE' and leaf['type'] != 'S_RS':
size = address_mode_def[leaf['type']]['size']
address += size
elif ('S_DIRECTIVE' == leaf['type']
and '.db' == leaf['children'][0]['value']):
for i in leaf['children']:
if 'T_ADDRESS' == i['type']:
address += 1
elif ('S_DIRECTIVE' == leaf['type']
and '.incbin' == leaf['children'][0]['value']):
address += 4 * 1024 # TODO check file size;
return labels
def semantic(ast, iNES=False, cart=None):
if cart is None:
cart = Cartridge()
labels = get_labels(ast)
address = 0
# translate statments to opcode
for leaf in ast:
if leaf['type'] == 'S_RS':
labels[leaf['children'][0]['value']] = cart.rs
cart.rs += get_value(leaf['children'][2])
elif leaf['type'] == 'S_DIRECTIVE':
directive = leaf['children'][0]['value']
if len(leaf['children']) == 2:
argument = get_value(leaf['children'][1], labels)
else:
argument = leaf['children'][1:]
if directive in directive_list:
directive_list[directive](argument, cart)
else:
raise Exception('UNKNOW DIRECTIVE')
else:
if leaf['type'] in ['S_IMPLIED', 'S_ACCUMULATOR']:
instruction = leaf['children'][0]['value']
address = False
elif leaf['type'] == 'S_RELATIVE':
instruction = leaf['children'][0]['value']
address = get_value(leaf['children'][1], labels)
elif leaf['type'] == 'S_IMMEDIATE_WITH_MODIFIER':
instruction = leaf['children'][0]['value']
modifier = leaf['children'][1]['value']
address = get_value(leaf['children'][3], labels)
if modifier == '#LOW':
address = (address & 0x00ff)
elif modifier == '#HIGH':
address = (address & 0xff00) >> 8
elif leaf['type'] in ['S_RELATIVE', 'S_IMMEDIATE', 'S_ZEROPAGE',
'S_ABSOLUTE', 'S_ZEROPAGE_X',
'S_ZEROPAGE_Y', 'S_ABSOLUTE_X',
'S_ABSOLUTE_Y']:
instruction = leaf['children'][0]['value']
address = get_value(leaf['children'][1], labels)
elif leaf['type'] in ['S_INDIRECT_X', 'S_INDIRECT_Y']:
instruction = leaf['children'][0]['value']
address = get_value(leaf['children'][2], labels)
address_mode = address_mode_def[leaf['type']]['short']
opcode = opcodes[instruction][address_mode]
if address_mode != 'sngl' and address_mode != 'acc':
if 'rel' == address_mode:
address = 126 + (address - cart.pc)
if address == 128:
address = 0
elif address < 128:
address = address | 0b10000000
elif address > 128:
address = address & 0b01111111
if address_mode_def[leaf['type']]['size'] == 2:
cart.append_code([opcode, address])
else:
arg1 = (address & 0x00ff)
arg2 = (address & 0xff00) >> 8
cart.append_code([opcode, arg1, arg2])
else:
cart.append_code([opcode])
# nes_code = []
if iNES:
return cart.get_ines_code()
else:
return cart.get_code()
def compile_file(asmfile, output=None, path=None):
from os.path import dirname, realpath
if path is None:
path = dirname(realpath(asmfile)) + '/'
if output is None:
output = 'output.nes'
with io.open(asmfile, "r", encoding="utf-8") as f:
opcodes = compile(f, path)
pynes.write_bin_code(opcodes, output)
def compile(code, path):
cart = Cartridge()
cart.path = path
tokens = lexical(code)
ast = syntax(tokens)
opcodes = semantic(ast, True, cart)
return opcodes
| bsd-3-clause |
kalliope-project/kalliope | kalliope/core/Models/Neuron.py | 1 | 1497 | class Neuron(object):
"""
This Class is representing a Neuron which is corresponding to an action to perform.
.. note:: Neurons are defined in the brain file
"""
def __init__(self, name=None, parameters=dict()):
self.name = name
self.parameters = parameters
def serialize(self):
"""
This method allows to serialize in a proper way this object
:return: A dict of name and parameters
:rtype: Dict
"""
return {
'name': self.name,
'parameters': self.parameters
}
def __str__(self):
"""
Return a string that describe the neuron. If a parameter contains the word "password",
the output of this parameter will be masked in order to not appears in clean in the console
:return: string description of the neuron
"""
returned_dict = {
'name': self.name,
'parameters': self.parameters
}
cleaned_parameters = dict()
for key, value in self.parameters.items():
if "password" in key:
cleaned_parameters[key] = "*****"
else:
cleaned_parameters[key] = value
returned_dict["parameters"] = cleaned_parameters
return str(returned_dict)
def __eq__(self, other):
"""
This is used to compare 2 objects
:param other:
:return:
"""
return self.__dict__ == other.__dict__
| gpl-3.0 |
FarhanHaque/bitcoin | qa/rpc-tests/signrawtransactions.py | 171 | 4631 | #!/usr/bin/env python2
# Copyright (c) 2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(BitcoinTestFramework):
"""Tests transaction signing via RPC command "signrawtransaction"."""
def setup_chain(self):
print('Initializing test directory ' + self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self, split=False):
self.nodes = start_nodes(1, self.options.tmpdir)
self.is_network_split = False
def successful_signing_test(self):
"""Creates and signs a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Creates and signs a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
| mit |
ClearCorp-dev/account-consolidation | __unported__/account_consolidation/wizard/consolidation_consolidate.py | 3 | 21384 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2011-2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
from openerp.osv.osv import except_osv
from openerp.tools.translate import _
class account_consolidation_consolidate(orm.TransientModel):
_name = 'account.consolidation.consolidate'
_inherit = 'account.consolidation.base'
def _default_journal(self, cr, uid, context=None):
comp_obj = self.pool['res.company']
journ_obj = self.pool['account.journal']
comp_id = comp_obj._company_default_get(cr, uid)
journal_id = journ_obj.search(cr, uid, [('company_id', '=', comp_id)], limit=1)
if journal_id:
return journal_id[0]
return False
_columns = {
'from_period_id': fields.many2one(
'account.period',
'Start Period',
required=True,
help="Select the same period in 'from' and 'to' "
"if you want to proceed with a single period. "
"Start Period is ignored for Year To Date accounts."),
'to_period_id': fields.many2one(
'account.period',
'End Period',
required=True,
help="The consolidation will be done at the very "
"last date of the selected period."),
'journal_id': fields.many2one(
'account.journal', 'Journal', required=True),
'target_move': fields.selection(
[('posted', 'All Posted Entries'),
('all', 'All Entries')],
'Target Moves',
required=True),
'subsidiary_ids': fields.many2many(
'res.company',
'account_conso_conso_comp_rel',
'conso_id',
'company_id',
string='Subsidiaries',
required=True),
}
_defaults = {'target_move': 'posted',
'journal_id': _default_journal,
}
def _check_periods_fy(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
assert len(ids) == 1, "only 1 id expected"
form = self.browse(cr, uid, ids[0], context=context)
return (form.from_period_id.fiscalyear_id.id ==
form.to_period_id.fiscalyear_id.id)
_constraints = [
(_check_periods_fy,
'Start Period and End Period must be of the same Fiscal Year !',
['from_period_id', 'to_period_id']),
]
def on_change_from_period_id(self, cr, uid, ids, from_period_id,
to_period_id, context=None):
""" On change of the From period, set the To period
to the same period if it is empty
:param from_period_id: ID of the selected from period id
:param to_period_id: ID of the current from period id
:return: dict of values to change
"""
result = {}
period_obj = self.pool.get('account.period')
from_period = period_obj.browse(cr, uid, from_period_id,
context=context)
if not to_period_id:
result['to_period_id'] = from_period_id
else:
to_period = period_obj.browse(cr, uid, to_period_id,
context=context)
if to_period.date_start < from_period.date_start:
result['to_period_id'] = from_period_id
result['fiscalyear_id'] = from_period.fiscalyear_id.id
return {'value': result}
def _currency_rate_type(self, cr, uid, ids, account, context=None):
"""
Returns the currency rate type to use
:param account: browse_record instance of account.account
:return: id of the currency rate type to use
"""
if account.consolidation_rate_type_id:
return account.consolidation_rate_type_id.id
elif account.user_type.consolidation_rate_type_id:
return account.user_type.consolidation_rate_type_id.id
else:
return False
def _consolidation_mode(self, cr, uid, ids, account, context=None):
"""
Returns the consolidation mode to use
:param account: browse instance of account.account
:return: 'ytd' or 'period'
"""
return (account.consolidation_mode or
account.user_type.consolidation_mode)
def _periods_holding_to_subsidiary(self, cr, uid, ids, period_ids,
subsidiary_id, context=None):
"""
Returns the periods of a subsidiary company which
correspond to the holding periods (same beginning and ending dates)
:param period_ids: list of periods of the holding
:param subsidiary_id: ID of the subsidiary for which
we want the period IDs
:return: list of periods of the subsidiaries
"""
period_obj = self.pool.get('account.period')
if isinstance(period_ids, (int, long)):
period_ids = [period_ids]
subs_period_ids = []
for period in period_obj.browse(cr, uid, period_ids, context=context):
subs_period_ids += period_obj.search(
cr, uid,
[('date_start', '=', period.date_start),
('date_stop', '=', period.date_stop),
('company_id', '=', subsidiary_id)],
context=context)
return subs_period_ids
def create_rate_difference_line(self, cr, uid, ids, move_id, consolidation_mode, context):
"""
We can have consolidation difference when a account is in YTD but in normal counterpart
has a different setting.
"""
move_obj = self.pool['account.move']
move_line_obj = self.pool['account.move.line']
currency_obj = self.pool['res.currency']
move = move_obj.browse(cr, uid, move_id, context=context)
if not move.line_id:
return False
diff_account = move.company_id.consolidation_diff_account_id
if not diff_account:
raise except_osv(_('Settings ERROR'),
_('Please set the "Consolidation difference account"'
' in company %s') % move.company_id.name)
debit = credit = 0.0
for line in move.line_id:
debit += line.debit
credit += line.credit
balance = debit - credit
# We do not want to create counter parts for amount smaller than
# "holding" company currency rounding policy.
# As generated lines are in draft state, accountant will be able to manage
# special cases
move_is_balanced = currency_obj.is_zero(cr, uid, move.company_id.currency_id, balance)
if not move_is_balanced:
diff_vals = {'account_id': diff_account.id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'company_id': move.company_id.id,
'date': move.date,
'debit': abs(balance) if balance < 0.0 else 0.0,
'credit': balance if balance > 0.0 else 0.0,
'name': _('Consolidation difference in mode %s') % consolidation_mode
}
return move_line_obj.create(cr, uid, diff_vals, context=context)
return False
def consolidate_account(self, cr, uid, ids, consolidation_mode,
subsidiary_period_ids, state, move_id,
holding_account_id, subsidiary_id, context=None):
"""
Consolidates the subsidiary account on the holding account
Creates move lines on the move with id "move_id"
:param consolidation_mode: consolidate by Periods or
Year To Date ('period' or 'ytd')
:param subsidiary_period_ids: IDs of periods for which we
want to sum the debit/credit
:param state: state of the moves to consolidate ('all' or 'posted')
:param move_id: ID of the move on which all the
created move lines will be linked
:param holding_account_id: ID of the account to consolidate
(on the holding), the method will
find the subsidiary's corresponding account
:param subsidiary_id: ID of the subsidiary to consolidate
:return: list of IDs of the created move lines
"""
if context is None:
context = {}
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_line_obj = self.pool.get('account.move.line')
currency_obj = self.pool.get('res.currency')
move = move_obj.browse(cr, uid, move_id, context=context)
holding_account = account_obj.browse(cr, uid, holding_account_id,
context=context)
subsidiary_account_id = account_obj.search(cr, uid,
[('code', '=', holding_account.code),
('company_id', '=', subsidiary_id)],
context=context)
if not subsidiary_account_id:
# an account may exist on the holding and not in the subsidiaries,
# nothing to do
return []
browse_ctx = dict(context, state=state, periods=subsidiary_period_ids)
# 1st item because the account's code is unique per company
subs_account = account_obj.browse(cr, uid, subsidiary_account_id[0],
context=browse_ctx)
vals = {
'name': _("Consolidation line in %s mode") % consolidation_mode,
'account_id': holding_account.id,
'move_id': move.id,
'journal_id': move.journal_id.id,
'period_id': move.period_id.id,
'company_id': move.company_id.id,
'date': move.date
}
balance = subs_account.balance
if not balance:
return False
if (holding_account.company_currency_id.id ==
subs_account.company_currency_id.id):
vals.update({
'debit': balance if balance > 0.0 else 0.0,
'credit': abs(balance) if balance < 0.0 else 0.0,
})
else:
currency_rate_type = self._currency_rate_type(cr, uid, ids,
holding_account, context=context)
currency_value = currency_obj.compute(cr, uid,
holding_account.company_currency_id.id,
subs_account.company_currency_id.id,
balance,
currency_rate_type_from=False, # means spot
currency_rate_type_to=currency_rate_type,
context=context)
vals.update({
'currency_id': subs_account.company_currency_id.id,
'amount_currency': subs_account.balance,
'debit': currency_value if currency_value > 0.0 else 0.0,
'credit': abs(currency_value) if currency_value < 0.0 else 0.0,
})
return move_line_obj.create(cr, uid, vals, context=context)
def reverse_moves(self, cr, uid, ids, subsidiary_id, journal_id,
reversal_date, context=None):
"""
Reverse all account moves of a journal which have
the "To be reversed" flag
:param subsidiary_id: ID of the subsidiary moves to reverse
:param journal_id: ID of the journal with moves to reverse
:param reversal_date: date when to create the reversal
:return: tuple with : list of IDs of the reversed moves,
list of IDs of the reversal moves
"""
move_obj = self.pool.get('account.move')
reversed_ids = move_obj.search(cr, uid,
[('journal_id', '=', journal_id),
('to_be_reversed', '=', True),
('consol_company_id', '=', subsidiary_id)],
context=context)
reversal_ids = move_obj.create_reversals(
cr, uid, reversed_ids, reversal_date, context=context)
return reversed_ids, reversal_ids
def consolidate_subsidiary(self, cr, uid, ids,
subsidiary_id, context=None):
"""
Consolidate one subsidiary on the Holding.
Create a move per subsidiary and consolidation type (YTD/Period)
and an move line per account of the subsidiary
:param subsidiary_id: ID of the subsidiary to consolidate
on the holding
:return: Tuple of form:
(list of IDs of the YTD moves,
list of IDs of the Period Moves)
"""
if context is None:
context = {}
if isinstance(ids, (int, long)):
ids = [ids]
assert len(ids) == 1, "only 1 id expected"
company_obj = self.pool.get('res.company')
move_obj = self.pool.get('account.move')
period_obj = self.pool.get('account.period')
form = self.browse(cr, uid, ids[0], context=context)
subsidiary = company_obj.browse(cr, uid, subsidiary_id, context=None)
data_ctx = dict(context, holding_coa=True)
holding_accounts_data = self._chart_accounts_data(
cr, uid,
ids,
form.holding_chart_account_id.id,
context=data_ctx)
subs_accounts_codes = self._chart_accounts_data(
cr, uid,
ids,
subsidiary.consolidation_chart_account_id.id,
context=context)
holding_accounts = [values for key, values
in holding_accounts_data.iteritems()
if key in subs_accounts_codes]
# split accounts in ytd and periods modes
# a move per type will be created
consolidation_modes = {'ytd': [], 'period': []}
for account in holding_accounts:
cm = self._consolidation_mode(
cr, uid, ids, account, context=context)
consolidation_modes[cm].append(account)
period_ids = period_obj.build_ctx_periods(
cr, uid,
form.from_period_id.id,
form.to_period_id.id)
generic_move_vals = {
'journal_id': form.journal_id.id,
'company_id': form.company_id.id,
'consol_company_id': subsidiary.id,
}
ytd_move_ids = []
period_move_ids = []
for consolidation_mode, accounts in consolidation_modes.iteritems():
if not accounts:
continue
# get list of periods for which we have to create a move
# in period mode : a move per period
# in ytd mode : a move at the last period
# (which will contains lines from 1st january to last period)
move_period_ids = (period_ids
if consolidation_mode == 'period'
else [form.to_period_id.id])
for move_period_id in move_period_ids:
period = period_obj.browse(
cr, uid, move_period_id, context=context)
# in ytd we compute the amount from the first
# day of the fiscal year
# in period, only for the period
if consolidation_mode == 'ytd':
date_from = period.fiscalyear_id.date_start
else:
date_from = period.date_start
date_to = period.date_stop
period_ctx = dict(context, company_id=subsidiary.id)
compute_from_period_id = period_obj.find(
cr, uid, date_from, context=period_ctx)[0]
compute_to_period_id = period_obj.find(
cr, uid, date_to, context=period_ctx)[0]
compute_period_ids = period_obj.build_ctx_periods(
cr, uid,
compute_from_period_id,
compute_to_period_id)
# reverse previous entries with flag 'to_be_reversed' (YTD)
self.reverse_moves(
cr, uid,
ids,
subsidiary.id,
form.journal_id.id,
date_to,
context=context)
# create the account move
# at the very last date of the end period
move_vals = dict(
generic_move_vals,
ref=_("Consolidation %s") % consolidation_mode,
period_id=period.id,
date=period.date_stop)
move_id = move_obj.create(cr, uid, move_vals, context=context)
# create a move line per account
has_move_line = False
for account in accounts:
m_id = self.consolidate_account(
cr, uid, ids,
consolidation_mode,
compute_period_ids,
form.target_move,
move_id,
account.id,
subsidiary.id,
context=context)
if m_id:
has_move_line = True
if has_move_line:
self.create_rate_difference_line(cr, uid, ids,
move_id, consolidation_mode, context=context)
locals()[consolidation_mode + '_move_ids'].append(move_id)
else:
# We delete created move if it has no line.
# As move are generated in draft mode they will be no gap in
# number if consolidation journal has correct settings.
# I agree it can be more efficient but size of refactoring
# is not in ressource scope
move_obj.unlink(cr, uid, [move_id])
return ytd_move_ids, period_move_ids
def run_consolidation(self, cr, uid, ids, context=None):
"""
Consolidate all selected subsidiaries Virtual Chart of Accounts
on the Holding Chart of Account
:return: dict to open an Entries view filtered on the created moves
"""
super(account_consolidation_consolidate, self).run_consolidation(
cr, uid, ids, context=context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
move_obj = self.pool.get('account.move')
form = self.browse(cr, uid, ids[0], context=context)
move_ids = []
ytd_move_ids = []
for subsidiary in form.subsidiary_ids:
new_move_ids = self.consolidate_subsidiary(
cr, uid, ids, subsidiary.id, context=context)
ytd_move_ids += new_move_ids[0]
move_ids += sum(new_move_ids, [])
# YTD moves have to be reversed on the next consolidation
move_obj.write(
cr, uid,
ytd_move_ids,
{'to_be_reversed': True},
context=context)
context.update({'move_ids': move_ids})
__, action_id = mod_obj.get_object_reference(
cr, uid, 'account', 'action_move_journal_line')
action = act_obj.read(cr, uid, [action_id], context=context)[0]
action['domain'] = unicode([('id', 'in', move_ids)])
action['name'] = _('Consolidated Entries')
action['context'] = unicode({'search_default_to_be_reversed': 0})
return action
| agpl-3.0 |
neuroidss/nupic.research | projects/capybara/supervised_baseline/v1_no_sequences/plot_results.py | 9 | 3714 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import argparse
import os
import pandas as pd
from sklearn.metrics import (classification_report, confusion_matrix,
accuracy_score)
from baseline_utils import predictions_vote
from plot_utils import (plot_confusion_matrix, plot_train_history,
plot_classification_report, plot_predictions)
if __name__ == '__main__':
# Path to CSV files (training history and predictions)
parser = argparse.ArgumentParser()
parser.add_argument('--vote_window', '-v', dest='vote_window',
type=int, default=11)
parser.add_argument('--input_dir', '-i', dest='input_dir',
type=str, default='results')
parser.add_argument('--output_dir', '-o', dest='output_dir',type=str,
default='plots')
options = parser.parse_args()
vote_window = options.vote_window
input_dir = options.input_dir
output_dir = options.output_dir
if not os.path.exists(output_dir):
os.makedirs(output_dir)
train_history_path = os.path.join(input_dir, 'train_history.csv')
predictions_path = os.path.join(input_dir, 'predictions.csv')
# Training history
df = pd.read_csv(train_history_path)
epochs = range(len(df.epoch.values))
acc = df.acc.values
loss = df.loss.values
output_file = os.path.join(output_dir, 'train_history.html')
plot_train_history(epochs, acc, loss, output_file)
print 'Plot saved:', output_file
# Predictions
df = pd.read_csv(predictions_path)
t = df.t.values
X_values = df.scalar_value.values
y_true = df.y_true.values
y_pred = df.y_pred.values
if vote_window > 0:
y_pred = predictions_vote(y_pred, vote_window)
# Accuracy
acc = accuracy_score(y_true, y_pred)
print 'Accuracy on test set:', acc
label_list = sorted(df.y_true.unique())
# Plot normalized confusion matrix
cnf_matrix = confusion_matrix(y_true, y_pred)
output_file = os.path.join(output_dir, 'confusion_matrix.png')
_ = plot_confusion_matrix(cnf_matrix,
output_file,
classes=label_list,
normalize=True,
title='Confusion matrix (accuracy=%.2f)' % acc)
print 'Plot saved:', output_file
# Classification report (F1 score, etc.)
clf_report = classification_report(y_true, y_pred)
output_file = os.path.join(output_dir, 'classification_report.png')
plot_classification_report(clf_report, output_file)
print 'Plot saved:', output_file
# Plot predictions
output_file = os.path.join(output_dir, 'predictions.html')
title = 'Predictions (accuracy=%s)' % acc
plot_predictions(t, X_values, y_true, y_pred, output_file, title)
print 'Plot saved:', output_file
| agpl-3.0 |
gotomypc/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
do4best/html5-boilerplate_v5.2.0 | node_modules/node-gyp/gyp/pylib/gyp/generator/eclipse.py | 1825 | 17014 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""GYP backend that generates Eclipse CDT settings files.
This backend DOES NOT generate Eclipse CDT projects. Instead, it generates XML
files that can be imported into an Eclipse CDT project. The XML file contains a
list of include paths and symbols (i.e. defines).
Because a full .cproject definition is not created by this generator, it's not
possible to properly define the include dirs and symbols for each file
individually. Instead, one set of includes/symbols is generated for the entire
project. This works fairly well (and is a vast improvement in general), but may
still result in a few indexer issues here and there.
This generator has no automated tests, so expect it to be broken.
"""
from xml.sax.saxutils import escape
import os.path
import subprocess
import gyp
import gyp.common
import gyp.msvs_emulation
import shlex
import xml.etree.cElementTree as ET
generator_wants_static_library_dependencies_adjusted = False
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'PRODUCT_DIR', 'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!), so we convert them to variables
generator_default_variables[dirname] = '$' + dirname
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
# Include dirs will occasionally use the SHARED_INTERMEDIATE_DIR variable as
# part of the path when dealing with generated headers. This value will be
# replaced dynamically for each configuration.
generator_default_variables['SHARED_INTERMEDIATE_DIR'] = \
'$SHARED_INTERMEDIATE_DIR'
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
flavor = gyp.common.GetFlavor(params)
default_variables.setdefault('OS', flavor)
if flavor == 'win':
# Copy additional generator configuration data from VS, which is shared
# by the Eclipse generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
def GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs, config_name, params,
compiler_path):
"""Calculate the set of include directories to be used.
Returns:
A list including all the include_dir's specified for every target followed
by any include directories that were added as cflag compiler options.
"""
gyp_includes_set = set()
compiler_includes_list = []
# Find compiler's default include dirs.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-xc++', '-v', '-'])
proc = subprocess.Popen(args=command, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = proc.communicate()[1]
# Extract the list of include dirs from the output, which has this format:
# ...
# #include "..." search starts here:
# #include <...> search starts here:
# /usr/include/c++/4.6
# /usr/local/include
# End of search list.
# ...
in_include_list = False
for line in output.splitlines():
if line.startswith('#include'):
in_include_list = True
continue
if line.startswith('End of search list.'):
break
if in_include_list:
include_dir = line.strip()
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if config_name in target['configurations']:
config = target['configurations'][config_name]
# Look for any include dirs that were explicitly added via cflags. This
# may be done in gyp files to force certain includes to come at the end.
# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and
# remove this.
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
cflags = msvs_settings.GetCflags(config_name)
else:
cflags = config['cflags']
for cflag in cflags:
if cflag.startswith('-I'):
include_dir = cflag[2:]
if include_dir not in compiler_includes_list:
compiler_includes_list.append(include_dir)
# Find standard gyp include dirs.
if config.has_key('include_dirs'):
include_dirs = config['include_dirs']
for shared_intermediate_dir in shared_intermediate_dirs:
for include_dir in include_dirs:
include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR',
shared_intermediate_dir)
if not os.path.isabs(include_dir):
base_dir = os.path.dirname(target_name)
include_dir = base_dir + '/' + include_dir
include_dir = os.path.abspath(include_dir)
gyp_includes_set.add(include_dir)
# Generate a list that has all the include dirs.
all_includes_list = list(gyp_includes_set)
all_includes_list.sort()
for compiler_include in compiler_includes_list:
if not compiler_include in gyp_includes_set:
all_includes_list.append(compiler_include)
# All done.
return all_includes_list
def GetCompilerPath(target_list, data, options):
"""Determine a command that can be used to invoke the compiler.
Returns:
If this is a gyp project that has explicit make settings, try to determine
the compiler from that. Otherwise, see if a compiler was specified via the
CC_target environment variable.
"""
# First, see if the compiler is configured in make's settings.
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_dict = data[build_file].get('make_global_settings', {})
for key, value in make_global_settings_dict:
if key in ['CC', 'CXX']:
return os.path.join(options.toplevel_dir, value)
# Check to see if the compiler was specified as an environment variable.
for key in ['CC_target', 'CC', 'CXX']:
compiler = os.environ.get(key)
if compiler:
return compiler
return 'gcc'
def GetAllDefines(target_list, target_dicts, data, config_name, params,
compiler_path):
"""Calculate the defines for a project.
Returns:
A dict that includes explict defines declared in gyp files along with all of
the default defines that the compiler uses.
"""
# Get defines declared in the gyp files.
all_defines = {}
flavor = gyp.common.GetFlavor(params)
if flavor == 'win':
generator_flags = params.get('generator_flags', {})
for target_name in target_list:
target = target_dicts[target_name]
if flavor == 'win':
msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags)
extra_defines = msvs_settings.GetComputedDefines(config_name)
else:
extra_defines = []
if config_name in target['configurations']:
config = target['configurations'][config_name]
target_defines = config['defines']
else:
target_defines = []
for define in target_defines + extra_defines:
split_define = define.split('=', 1)
if len(split_define) == 1:
split_define.append('1')
if split_define[0].strip() in all_defines:
# Already defined
continue
all_defines[split_define[0].strip()] = split_define[1].strip()
# Get default compiler defines (if possible).
if flavor == 'win':
return all_defines # Default defines already processed in the loop above.
if compiler_path:
command = shlex.split(compiler_path)
command.extend(['-E', '-dM', '-'])
cpp_proc = subprocess.Popen(args=command, cwd='.',
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
cpp_output = cpp_proc.communicate()[0]
cpp_lines = cpp_output.split('\n')
for cpp_line in cpp_lines:
if not cpp_line.strip():
continue
cpp_line_parts = cpp_line.split(' ', 2)
key = cpp_line_parts[1]
if len(cpp_line_parts) >= 3:
val = cpp_line_parts[2]
else:
val = '1'
all_defines[key] = val
return all_defines
def WriteIncludePaths(out, eclipse_langs, include_dirs):
"""Write the includes section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.IncludePaths">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for include_dir in include_dirs:
out.write(' <includepath workspace_path="false">%s</includepath>\n' %
include_dir)
out.write(' </language>\n')
out.write(' </section>\n')
def WriteMacros(out, eclipse_langs, defines):
"""Write the macros section of a CDT settings export file."""
out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \
'settingswizards.Macros">\n')
out.write(' <language name="holder for library settings"></language>\n')
for lang in eclipse_langs:
out.write(' <language name="%s">\n' % lang)
for key in sorted(defines.iterkeys()):
out.write(' <macro><name>%s</name><value>%s</value></macro>\n' %
(escape(key), escape(defines[key])))
out.write(' </language>\n')
out.write(' </section>\n')
def GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name):
options = params['options']
generator_flags = params.get('generator_flags', {})
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.join(generator_flags.get('output_dir', 'out'),
config_name)
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
# Ninja uses out/Debug/gen while make uses out/Debug/obj/gen as the
# SHARED_INTERMEDIATE_DIR. Include both possible locations.
shared_intermediate_dirs = [os.path.join(toplevel_build, 'obj', 'gen'),
os.path.join(toplevel_build, 'gen')]
GenerateCdtSettingsFile(target_list,
target_dicts,
data,
params,
config_name,
os.path.join(toplevel_build,
'eclipse-cdt-settings.xml'),
options,
shared_intermediate_dirs)
GenerateClasspathFile(target_list,
target_dicts,
options.toplevel_dir,
toplevel_build,
os.path.join(toplevel_build,
'eclipse-classpath.xml'))
def GenerateCdtSettingsFile(target_list, target_dicts, data, params,
config_name, out_name, options,
shared_intermediate_dirs):
gyp.common.EnsureDirExists(out_name)
with open(out_name, 'w') as out:
out.write('<?xml version="1.0" encoding="UTF-8"?>\n')
out.write('<cdtprojectproperties>\n')
eclipse_langs = ['C++ Source File', 'C Source File', 'Assembly Source File',
'GNU C++', 'GNU C', 'Assembly']
compiler_path = GetCompilerPath(target_list, data, options)
include_dirs = GetAllIncludeDirectories(target_list, target_dicts,
shared_intermediate_dirs,
config_name, params, compiler_path)
WriteIncludePaths(out, eclipse_langs, include_dirs)
defines = GetAllDefines(target_list, target_dicts, data, config_name,
params, compiler_path)
WriteMacros(out, eclipse_langs, defines)
out.write('</cdtprojectproperties>\n')
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir,
toplevel_build, out_name):
'''Generates a classpath file suitable for symbol navigation and code
completion of Java code (such as in Android projects) by finding all
.java and .jar files used as action inputs.'''
gyp.common.EnsureDirExists(out_name)
result = ET.Element('classpath')
def AddElements(kind, paths):
# First, we need to normalize the paths so they are all relative to the
# toplevel dir.
rel_paths = set()
for path in paths:
if os.path.isabs(path):
rel_paths.add(os.path.relpath(path, toplevel_dir))
else:
rel_paths.add(path)
for path in sorted(rel_paths):
entry_element = ET.SubElement(result, 'classpathentry')
entry_element.set('kind', kind)
entry_element.set('path', path)
AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir))
AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir))
# Include the standard JRE container and a dummy out folder
AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER'])
# Include a dummy out folder so that Eclipse doesn't use the default /bin
# folder in the root of the project.
AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')])
ET.ElementTree(result).write(out_name)
def GetJavaJars(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all .jars used as inputs.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'):
if os.path.isabs(input_):
yield input_
else:
yield os.path.join(os.path.dirname(target_name), input_)
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir):
'''Generates a sequence of all likely java package root directories.'''
for target_name in target_list:
target = target_dicts[target_name]
for action in target.get('actions', []):
for input_ in action['inputs']:
if (os.path.splitext(input_)[1] == '.java' and
not input_.startswith('$')):
dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name),
input_))
# If there is a parent 'src' or 'java' folder, navigate up to it -
# these are canonical package root names in Chromium. This will
# break if 'src' or 'java' exists in the package structure. This
# could be further improved by inspecting the java file for the
# package name if this proves to be too fragile in practice.
parent_search = dir_
while os.path.basename(parent_search) not in ['src', 'java']:
parent_search, _ = os.path.split(parent_search)
if not parent_search or parent_search == toplevel_dir:
# Didn't find a known root, just return the original path
yield dir_
break
else:
yield parent_search
def GenerateOutput(target_list, target_dicts, data, params):
"""Generate an XML settings file that can be imported into a CDT project."""
if params['options'].generator_output:
raise NotImplementedError("--generator_output not implemented for eclipse")
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data, params,
user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data, params,
config_name)
| mit |
Just-D/chromium-1 | tools/telemetry/third_party/gsutilz/third_party/boto/boto/s3/resumable_download_handler.py | 152 | 15595 | # Copyright 2010 Google Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import errno
import httplib
import os
import re
import socket
import time
import boto
from boto import config, storage_uri_for_key
from boto.connection import AWSAuthConnection
from boto.exception import ResumableDownloadException
from boto.exception import ResumableTransferDisposition
from boto.s3.keyfile import KeyFile
from boto.gs.key import Key as GSKey
"""
Resumable download handler.
Resumable downloads will retry failed downloads, resuming at the byte count
completed by the last download attempt. If too many retries happen with no
progress (per configurable num_retries param), the download will be aborted.
The caller can optionally specify a tracker_file_name param in the
ResumableDownloadHandler constructor. If you do this, that file will
save the state needed to allow retrying later, in a separate process
(e.g., in a later run of gsutil).
Note that resumable downloads work across providers (they depend only
on support Range GETs), but this code is in the boto.s3 package
because it is the wrong abstraction level to go in the top-level boto
package.
TODO: At some point we should refactor the code to have a storage_service
package where all these provider-independent files go.
"""
class ByteTranslatingCallbackHandler(object):
"""
Proxy class that translates progress callbacks made by
boto.s3.Key.get_file(), taking into account that we're resuming
a download.
"""
def __init__(self, proxied_cb, download_start_point):
self.proxied_cb = proxied_cb
self.download_start_point = download_start_point
def call(self, total_bytes_uploaded, total_size):
self.proxied_cb(self.download_start_point + total_bytes_uploaded,
total_size)
def get_cur_file_size(fp, position_to_eof=False):
"""
Returns size of file, optionally leaving fp positioned at EOF.
"""
if isinstance(fp, KeyFile) and not position_to_eof:
# Avoid EOF seek for KeyFile case as it's very inefficient.
return fp.getkey().size
if not position_to_eof:
cur_pos = fp.tell()
fp.seek(0, os.SEEK_END)
cur_file_size = fp.tell()
if not position_to_eof:
fp.seek(cur_pos, os.SEEK_SET)
return cur_file_size
class ResumableDownloadHandler(object):
"""
Handler for resumable downloads.
"""
MIN_ETAG_LEN = 5
RETRYABLE_EXCEPTIONS = (httplib.HTTPException, IOError, socket.error,
socket.gaierror)
def __init__(self, tracker_file_name=None, num_retries=None):
"""
Constructor. Instantiate once for each downloaded file.
:type tracker_file_name: string
:param tracker_file_name: optional file name to save tracking info
about this download. If supplied and the current process fails
the download, it can be retried in a new process. If called
with an existing file containing an unexpired timestamp,
we'll resume the transfer for this file; else we'll start a
new resumable download.
:type num_retries: int
:param num_retries: the number of times we'll re-try a resumable
download making no progress. (Count resets every time we get
progress, so download can span many more than this number of
retries.)
"""
self.tracker_file_name = tracker_file_name
self.num_retries = num_retries
self.etag_value_for_current_download = None
if tracker_file_name:
self._load_tracker_file_etag()
# Save download_start_point in instance state so caller can
# find how much was transferred by this ResumableDownloadHandler
# (across retries).
self.download_start_point = None
def _load_tracker_file_etag(self):
f = None
try:
f = open(self.tracker_file_name, 'r')
self.etag_value_for_current_download = f.readline().rstrip('\n')
# We used to match an MD5-based regex to ensure that the etag was
# read correctly. Since ETags need not be MD5s, we now do a simple
# length sanity check instead.
if len(self.etag_value_for_current_download) < self.MIN_ETAG_LEN:
print('Couldn\'t read etag in tracker file (%s). Restarting '
'download from scratch.' % self.tracker_file_name)
except IOError as e:
# Ignore non-existent file (happens first time a download
# is attempted on an object), but warn user for other errors.
if e.errno != errno.ENOENT:
# Will restart because
# self.etag_value_for_current_download is None.
print('Couldn\'t read URI tracker file (%s): %s. Restarting '
'download from scratch.' %
(self.tracker_file_name, e.strerror))
finally:
if f:
f.close()
def _save_tracker_info(self, key):
self.etag_value_for_current_download = key.etag.strip('"\'')
if not self.tracker_file_name:
return
f = None
try:
f = open(self.tracker_file_name, 'w')
f.write('%s\n' % self.etag_value_for_current_download)
except IOError as e:
raise ResumableDownloadException(
'Couldn\'t write tracker file (%s): %s.\nThis can happen'
'if you\'re using an incorrectly configured download tool\n'
'(e.g., gsutil configured to save tracker files to an '
'unwritable directory)' %
(self.tracker_file_name, e.strerror),
ResumableTransferDisposition.ABORT)
finally:
if f:
f.close()
def _remove_tracker_file(self):
if (self.tracker_file_name and
os.path.exists(self.tracker_file_name)):
os.unlink(self.tracker_file_name)
def _attempt_resumable_download(self, key, fp, headers, cb, num_cb,
torrent, version_id, hash_algs):
"""
Attempts a resumable download.
Raises ResumableDownloadException if any problems occur.
"""
cur_file_size = get_cur_file_size(fp, position_to_eof=True)
if (cur_file_size and
self.etag_value_for_current_download and
self.etag_value_for_current_download == key.etag.strip('"\'')):
# Try to resume existing transfer.
if cur_file_size > key.size:
raise ResumableDownloadException(
'%s is larger (%d) than %s (%d).\nDeleting tracker file, so '
'if you re-try this download it will start from scratch' %
(fp.name, cur_file_size, str(storage_uri_for_key(key)),
key.size), ResumableTransferDisposition.ABORT)
elif cur_file_size == key.size:
if key.bucket.connection.debug >= 1:
print('Download complete.')
return
if key.bucket.connection.debug >= 1:
print('Resuming download.')
headers = headers.copy()
headers['Range'] = 'bytes=%d-%d' % (cur_file_size, key.size - 1)
cb = ByteTranslatingCallbackHandler(cb, cur_file_size).call
self.download_start_point = cur_file_size
else:
if key.bucket.connection.debug >= 1:
print('Starting new resumable download.')
self._save_tracker_info(key)
self.download_start_point = 0
# Truncate the file, in case a new resumable download is being
# started atop an existing file.
fp.truncate(0)
# Disable AWSAuthConnection-level retry behavior, since that would
# cause downloads to restart from scratch.
if isinstance(key, GSKey):
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0, hash_algs=hash_algs)
else:
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
fp.flush()
def get_file(self, key, fp, headers, cb=None, num_cb=10, torrent=False,
version_id=None, hash_algs=None):
"""
Retrieves a file from a Key
:type key: :class:`boto.s3.key.Key` or subclass
:param key: The Key object from which upload is to be downloaded
:type fp: file
:param fp: File pointer into which data should be downloaded
:type headers: string
:param: headers to send when retrieving the files
:type cb: function
:param cb: (optional) a callback function that will be called to report
progress on the download. The callback should accept two integer
parameters, the first representing the number of bytes that have
been successfully transmitted from the storage service and
the second representing the total number of bytes that need
to be transmitted.
:type num_cb: int
:param num_cb: (optional) If a callback is specified with the cb
parameter this parameter determines the granularity of the callback
by defining the maximum number of times the callback will be
called during the file transfer.
:type torrent: bool
:param torrent: Flag for whether to get a torrent for the file
:type version_id: string
:param version_id: The version ID (optional)
:type hash_algs: dictionary
:param hash_algs: (optional) Dictionary of hash algorithms and
corresponding hashing class that implements update() and digest().
Defaults to {'md5': hashlib/md5.md5}.
Raises ResumableDownloadException if a problem occurs during
the transfer.
"""
debug = key.bucket.connection.debug
if not headers:
headers = {}
# Use num-retries from constructor if one was provided; else check
# for a value specified in the boto config file; else default to 6.
if self.num_retries is None:
self.num_retries = config.getint('Boto', 'num_retries', 6)
progress_less_iterations = 0
while True: # Retry as long as we're making progress.
had_file_bytes_before_attempt = get_cur_file_size(fp)
try:
self._attempt_resumable_download(key, fp, headers, cb, num_cb,
torrent, version_id, hash_algs)
# Download succceded, so remove the tracker file (if have one).
self._remove_tracker_file()
# Previously, check_final_md5() was called here to validate
# downloaded file's checksum, however, to be consistent with
# non-resumable downloads, this call was removed. Checksum
# validation of file contents should be done by the caller.
if debug >= 1:
print('Resumable download complete.')
return
except self.RETRYABLE_EXCEPTIONS as e:
if debug >= 1:
print('Caught exception (%s)' % e.__repr__())
if isinstance(e, IOError) and e.errno == errno.EPIPE:
# Broken pipe error causes httplib to immediately
# close the socket (http://bugs.python.org/issue5542),
# so we need to close and reopen the key before resuming
# the download.
if isinstance(key, GSKey):
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0, hash_algs=hash_algs)
else:
key.get_file(fp, headers, cb, num_cb, torrent, version_id,
override_num_retries=0)
except ResumableDownloadException as e:
if (e.disposition ==
ResumableTransferDisposition.ABORT_CUR_PROCESS):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s)' % e.message)
raise
elif (e.disposition ==
ResumableTransferDisposition.ABORT):
if debug >= 1:
print('Caught non-retryable ResumableDownloadException '
'(%s); aborting and removing tracker file' %
e.message)
self._remove_tracker_file()
raise
else:
if debug >= 1:
print('Caught ResumableDownloadException (%s) - will '
'retry' % e.message)
# At this point we had a re-tryable failure; see if made progress.
if get_cur_file_size(fp) > had_file_bytes_before_attempt:
progress_less_iterations = 0
else:
progress_less_iterations += 1
if progress_less_iterations > self.num_retries:
# Don't retry any longer in the current process.
raise ResumableDownloadException(
'Too many resumable download attempts failed without '
'progress. You might try this download again later',
ResumableTransferDisposition.ABORT_CUR_PROCESS)
# Close the key, in case a previous download died partway
# through and left data in the underlying key HTTP buffer.
# Do this within a try/except block in case the connection is
# closed (since key.close() attempts to do a final read, in which
# case this read attempt would get an IncompleteRead exception,
# which we can safely ignore.
try:
key.close()
except httplib.IncompleteRead:
pass
sleep_time_secs = 2**progress_less_iterations
if debug >= 1:
print('Got retryable failure (%d progress-less in a row).\n'
'Sleeping %d seconds before re-trying' %
(progress_less_iterations, sleep_time_secs))
time.sleep(sleep_time_secs)
| bsd-3-clause |
Icenowy/shadowsocks | shadowsocks/lru_cache.py | 983 | 4290 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import collections
import logging
import time
# this LRUCache is optimized for concurrency, not QPS
# n: concurrency, keys stored in the cache
# m: visits not timed out, proportional to QPS * timeout
# get & set is O(1), not O(n). thus we can support very large n
# TODO: if timeout or QPS is too large, then this cache is not very efficient,
# as sweep() causes long pause
class LRUCache(collections.MutableMapping):
"""This class is not thread safe"""
def __init__(self, timeout=60, close_callback=None, *args, **kwargs):
self.timeout = timeout
self.close_callback = close_callback
self._store = {}
self._time_to_keys = collections.defaultdict(list)
self._keys_to_last_time = {}
self._last_visits = collections.deque()
self._closed_values = set()
self.update(dict(*args, **kwargs)) # use the free update to set keys
def __getitem__(self, key):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._time_to_keys[t].append(key)
self._last_visits.append(t)
return self._store[key]
def __setitem__(self, key, value):
# O(1)
t = time.time()
self._keys_to_last_time[key] = t
self._store[key] = value
self._time_to_keys[t].append(key)
self._last_visits.append(t)
def __delitem__(self, key):
# O(1)
del self._store[key]
del self._keys_to_last_time[key]
def __iter__(self):
return iter(self._store)
def __len__(self):
return len(self._store)
def sweep(self):
# O(m)
now = time.time()
c = 0
while len(self._last_visits) > 0:
least = self._last_visits[0]
if now - least <= self.timeout:
break
if self.close_callback is not None:
for key in self._time_to_keys[least]:
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
value = self._store[key]
if value not in self._closed_values:
self.close_callback(value)
self._closed_values.add(value)
for key in self._time_to_keys[least]:
self._last_visits.popleft()
if key in self._store:
if now - self._keys_to_last_time[key] > self.timeout:
del self._store[key]
del self._keys_to_last_time[key]
c += 1
del self._time_to_keys[least]
if c:
self._closed_values.clear()
logging.debug('%d keys swept' % c)
def test():
c = LRUCache(timeout=0.3)
c['a'] = 1
assert c['a'] == 1
time.sleep(0.5)
c.sweep()
assert 'a' not in c
c['a'] = 2
c['b'] = 3
time.sleep(0.2)
c.sweep()
assert c['a'] == 2
assert c['b'] == 3
time.sleep(0.2)
c.sweep()
c['b']
time.sleep(0.2)
c.sweep()
assert 'a' not in c
assert c['b'] == 3
time.sleep(0.5)
c.sweep()
assert 'a' not in c
assert 'b' not in c
global close_cb_called
close_cb_called = False
def close_cb(t):
global close_cb_called
assert not close_cb_called
close_cb_called = True
c = LRUCache(timeout=0.1, close_callback=close_cb)
c['s'] = 1
c['s']
time.sleep(0.1)
c['s']
time.sleep(0.3)
c.sweep()
if __name__ == '__main__':
test()
| apache-2.0 |
google/llvm-propeller | libcxx/utils/google-benchmark/tools/compare.py | 26 | 17585 | #!/usr/bin/env python
import unittest
"""
compare.py - versatile benchmark output compare tool
"""
import argparse
from argparse import ArgumentParser
import sys
import gbench
from gbench import util, report
from gbench.util import *
def check_inputs(in1, in2, flags):
"""
Perform checking on the user provided inputs and diagnose any abnormalities
"""
in1_kind, in1_err = classify_input_file(in1)
in2_kind, in2_err = classify_input_file(in2)
output_file = find_benchmark_flag('--benchmark_out=', flags)
output_type = find_benchmark_flag('--benchmark_out_format=', flags)
if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file:
print(("WARNING: '--benchmark_out=%s' will be passed to both "
"benchmarks causing it to be overwritten") % output_file)
if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0:
print("WARNING: passing optional flags has no effect since both "
"inputs are JSON")
if output_type is not None and output_type != 'json':
print(("ERROR: passing '--benchmark_out_format=%s' to 'compare.py`"
" is not supported.") % output_type)
sys.exit(1)
def create_parser():
parser = ArgumentParser(
description='versatile benchmark output compare tool')
parser.add_argument(
'-a',
'--display_aggregates_only',
dest='display_aggregates_only',
action="store_true",
help="If there are repetitions, by default, we display everything - the"
" actual runs, and the aggregates computed. Sometimes, it is "
"desirable to only view the aggregates. E.g. when there are a lot "
"of repetitions. Do note that only the display is affected. "
"Internally, all the actual runs are still used, e.g. for U test.")
utest = parser.add_argument_group()
utest.add_argument(
'--no-utest',
dest='utest',
default=True,
action="store_false",
help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS))
alpha_default = 0.05
utest.add_argument(
"--alpha",
dest='utest_alpha',
default=alpha_default,
type=float,
help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") %
alpha_default)
subparsers = parser.add_subparsers(
help='This tool has multiple modes of operation:',
dest='mode')
parser_a = subparsers.add_parser(
'benchmarks',
help='The most simple use-case, compare all the output of these two benchmarks')
baseline = parser_a.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
contender = parser_a.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
parser_a.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_b = subparsers.add_parser(
'filters', help='Compare filter one with the filter two of benchmark')
baseline = parser_b.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test',
metavar='test',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_b.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_b.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
parser_c = subparsers.add_parser(
'benchmarksfiltered',
help='Compare filter one of first benchmark with filter two of the second benchmark')
baseline = parser_c.add_argument_group(
'baseline', 'The benchmark baseline')
baseline.add_argument(
'test_baseline',
metavar='test_baseline',
type=argparse.FileType('r'),
nargs=1,
help='A benchmark executable or JSON output file')
baseline.add_argument(
'filter_baseline',
metavar='filter_baseline',
type=str,
nargs=1,
help='The first filter, that will be used as baseline')
contender = parser_c.add_argument_group(
'contender', 'The benchmark that will be compared against the baseline')
contender.add_argument(
'test_contender',
metavar='test_contender',
type=argparse.FileType('r'),
nargs=1,
help='The second benchmark executable or JSON output file, that will be compared against the baseline')
contender.add_argument(
'filter_contender',
metavar='filter_contender',
type=str,
nargs=1,
help='The second filter, that will be compared against the baseline')
parser_c.add_argument(
'benchmark_options',
metavar='benchmark_options',
nargs=argparse.REMAINDER,
help='Arguments to pass when running benchmark executables')
return parser
def main():
# Parse the command line flags
parser = create_parser()
args, unknown_args = parser.parse_known_args()
if args.mode is None:
parser.print_help()
exit(1)
assert not unknown_args
benchmark_options = args.benchmark_options
if args.mode == 'benchmarks':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = ''
filter_contender = ''
# NOTE: if test_baseline == test_contender, you are analyzing the stdev
description = 'Comparing %s to %s' % (test_baseline, test_contender)
elif args.mode == 'filters':
test_baseline = args.test[0].name
test_contender = args.test[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if filter_baseline == filter_contender, you are analyzing the
# stdev
description = 'Comparing %s to %s (from %s)' % (
filter_baseline, filter_contender, args.test[0].name)
elif args.mode == 'benchmarksfiltered':
test_baseline = args.test_baseline[0].name
test_contender = args.test_contender[0].name
filter_baseline = args.filter_baseline[0]
filter_contender = args.filter_contender[0]
# NOTE: if test_baseline == test_contender and
# filter_baseline == filter_contender, you are analyzing the stdev
description = 'Comparing %s (from %s) to %s (from %s)' % (
filter_baseline, test_baseline, filter_contender, test_contender)
else:
# should never happen
print("Unrecognized mode of operation: '%s'" % args.mode)
parser.print_help()
exit(1)
check_inputs(test_baseline, test_contender, benchmark_options)
if args.display_aggregates_only:
benchmark_options += ['--benchmark_display_aggregates_only=true']
options_baseline = []
options_contender = []
if filter_baseline and filter_contender:
options_baseline = ['--benchmark_filter=%s' % filter_baseline]
options_contender = ['--benchmark_filter=%s' % filter_contender]
# Run the benchmarks and report the results
json1 = json1_orig = gbench.util.run_or_load_benchmark(
test_baseline, benchmark_options + options_baseline)
json2 = json2_orig = gbench.util.run_or_load_benchmark(
test_contender, benchmark_options + options_contender)
# Now, filter the benchmarks so that the difference report can work
if filter_baseline and filter_contender:
replacement = '[%s vs. %s]' % (filter_baseline, filter_contender)
json1 = gbench.report.filter_benchmark(
json1_orig, filter_baseline, replacement)
json2 = gbench.report.filter_benchmark(
json2_orig, filter_contender, replacement)
# Diff and output
output_lines = gbench.report.generate_difference_report(
json1, json2, args.display_aggregates_only,
args.utest, args.utest_alpha)
print(description)
for ln in output_lines:
print(ln)
class TestParser(unittest.TestCase):
def setUp(self):
self.parser = create_parser()
testInputs = os.path.join(
os.path.dirname(
os.path.realpath(__file__)),
'gbench',
'Inputs')
self.testInput0 = os.path.join(testInputs, 'test1_run1.json')
self.testInput1 = os.path.join(testInputs, 'test1_run2.json')
def test_benchmarks_basic(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest(self):
parsed = self.parser.parse_args(
['--no-utest', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.05)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_display_aggregates_only(self):
parsed = self.parser.parse_args(
['-a', 'benchmarks', self.testInput0, self.testInput1])
self.assertTrue(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_basic_without_utest_with_utest_alpha(self):
parsed = self.parser.parse_args(
['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1])
self.assertFalse(parsed.display_aggregates_only)
self.assertFalse(parsed.utest)
self.assertEqual(parsed.utest_alpha, 0.314)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertFalse(parsed.benchmark_options)
def test_benchmarks_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['d'])
def test_benchmarks_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarks', self.testInput0, self.testInput1, '--', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarks')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_basic(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertFalse(parsed.benchmark_options)
def test_filters_with_remainder(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['e'])
def test_filters_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['filters', self.testInput0, 'c', 'd', '--', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'filters')
self.assertEqual(parsed.test[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.filter_contender[0], 'd')
self.assertEqual(parsed.benchmark_options, ['f'])
def test_benchmarksfiltered_basic(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertFalse(parsed.benchmark_options)
def test_benchmarksfiltered_with_remainder(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'f')
def test_benchmarksfiltered_with_remainder_after_doubleminus(self):
parsed = self.parser.parse_args(
['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g'])
self.assertFalse(parsed.display_aggregates_only)
self.assertTrue(parsed.utest)
self.assertEqual(parsed.mode, 'benchmarksfiltered')
self.assertEqual(parsed.test_baseline[0].name, self.testInput0)
self.assertEqual(parsed.filter_baseline[0], 'c')
self.assertEqual(parsed.test_contender[0].name, self.testInput1)
self.assertEqual(parsed.filter_contender[0], 'e')
self.assertEqual(parsed.benchmark_options[0], 'g')
if __name__ == '__main__':
# unittest.main()
main()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# kate: tab-width: 4; replace-tabs on; indent-width 4; tab-indents: off;
# kate: indent-mode python; remove-trailing-spaces modified;
| apache-2.0 |
goliveirab/odoo | openerp/osv/__init__.py | 337 | 1080 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import osv
import fields
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
coinkeeper/2015-06-22_19-07_digitalcoin | contrib/devtools/symbol-check.py | 138 | 4151 | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function
import subprocess
import re
import sys
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
READELF_CMD = '/usr/bin/readelf'
CPPFILT_CMD = '/usr/bin/c++filt'
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split('\n'):
line = line.split()
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
# Check imported symbols
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
retval = 1
# Check exported symbols
for sym,version in read_symbols(filename, False):
print('%s: export of symbol %s not allowed' % (filename, cppfilt(sym)))
retval = 1
exit(retval)
| mit |
cortedeltimo/SickRage | lib/oauthlib/common.py | 9 | 14553 | # -*- coding: utf-8 -*-
"""
oauthlib.common
~~~~~~~~~~~~~~
This module provides data structures and utilities common
to all implementations of OAuth.
"""
from __future__ import absolute_import, unicode_literals
import collections
import datetime
import logging
import random
import re
import sys
import time
try:
from urllib import quote as _quote
from urllib import unquote as _unquote
from urllib import urlencode as _urlencode
except ImportError:
from urllib.parse import quote as _quote
from urllib.parse import unquote as _unquote
from urllib.parse import urlencode as _urlencode
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
UNICODE_ASCII_CHARACTER_SET = ('abcdefghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'0123456789')
CLIENT_ID_CHARACTER_SET = (r' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMN'
'OPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}')
SANITIZE_PATTERN = re.compile(r'([^&;]*(?:password|token)[^=]*=)[^&;]+', re.IGNORECASE)
INVALID_HEX_PATTERN = re.compile(r'%[^0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]')
always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ'
'abcdefghijklmnopqrstuvwxyz'
'0123456789' '_.-')
log = logging.getLogger('oauthlib')
PY3 = sys.version_info[0] == 3
if PY3:
unicode_type = str
bytes_type = bytes
else:
unicode_type = unicode
bytes_type = str
# 'safe' must be bytes (Python 2.6 requires bytes, other versions allow either)
def quote(s, safe=b'/'):
s = s.encode('utf-8') if isinstance(s, unicode_type) else s
s = _quote(s, safe)
# PY3 always returns unicode. PY2 may return either, depending on whether
# it had to modify the string.
if isinstance(s, bytes_type):
s = s.decode('utf-8')
return s
def unquote(s):
s = _unquote(s)
# PY3 always returns unicode. PY2 seems to always return what you give it,
# which differs from quote's behavior. Just to be safe, make sure it is
# unicode before we return.
if isinstance(s, bytes_type):
s = s.decode('utf-8')
return s
def urlencode(params):
utf8_params = encode_params_utf8(params)
urlencoded = _urlencode(utf8_params)
if isinstance(urlencoded, unicode_type): # PY3 returns unicode
return urlencoded
else:
return urlencoded.decode("utf-8")
def encode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are encoded to
bytestrings using UTF-8
"""
encoded = []
for k, v in params:
encoded.append((
k.encode('utf-8') if isinstance(k, unicode_type) else k,
v.encode('utf-8') if isinstance(v, unicode_type) else v))
return encoded
def decode_params_utf8(params):
"""Ensures that all parameters in a list of 2-element tuples are decoded to
unicode using UTF-8.
"""
decoded = []
for k, v in params:
decoded.append((
k.decode('utf-8') if isinstance(k, bytes_type) else k,
v.decode('utf-8') if isinstance(v, bytes_type) else v))
return decoded
urlencoded = set(always_safe) | set('=&;:%+~,*@!()/?')
def urldecode(query):
"""Decode a query string in x-www-form-urlencoded format into a sequence
of two-element tuples.
Unlike urlparse.parse_qsl(..., strict_parsing=True) urldecode will enforce
correct formatting of the query string by validation. If validation fails
a ValueError will be raised. urllib.parse_qsl will only raise errors if
any of name-value pairs omits the equals sign.
"""
# Check if query contains invalid characters
if query and not set(query) <= urlencoded:
error = ("Error trying to decode a non urlencoded string. "
"Found invalid characters: %s "
"in the string: '%s'. "
"Please ensure the request/response body is "
"x-www-form-urlencoded.")
raise ValueError(error % (set(query) - urlencoded, query))
# Check for correctly hex encoded values using a regular expression
# All encoded values begin with % followed by two hex characters
# correct = %00, %A0, %0A, %FF
# invalid = %G0, %5H, %PO
if INVALID_HEX_PATTERN.search(query):
raise ValueError('Invalid hex encoding in query string.')
# We encode to utf-8 prior to parsing because parse_qsl behaves
# differently on unicode input in python 2 and 3.
# Python 2.7
# >>> urlparse.parse_qsl(u'%E5%95%A6%E5%95%A6')
# u'\xe5\x95\xa6\xe5\x95\xa6'
# Python 2.7, non unicode input gives the same
# >>> urlparse.parse_qsl('%E5%95%A6%E5%95%A6')
# '\xe5\x95\xa6\xe5\x95\xa6'
# but now we can decode it to unicode
# >>> urlparse.parse_qsl('%E5%95%A6%E5%95%A6').decode('utf-8')
# u'\u5566\u5566'
# Python 3.3 however
# >>> urllib.parse.parse_qsl(u'%E5%95%A6%E5%95%A6')
# u'\u5566\u5566'
query = query.encode(
'utf-8') if not PY3 and isinstance(query, unicode_type) else query
# We want to allow queries such as "c2" whereas urlparse.parse_qsl
# with the strict_parsing flag will not.
params = urlparse.parse_qsl(query, keep_blank_values=True)
# unicode all the things
return decode_params_utf8(params)
def extract_params(raw):
"""Extract parameters and return them as a list of 2-tuples.
Will successfully extract parameters from urlencoded query strings,
dicts, or lists of 2-tuples. Empty strings/dicts/lists will return an
empty list of parameters. Any other input will result in a return
value of None.
"""
if isinstance(raw, bytes_type) or isinstance(raw, unicode_type):
try:
params = urldecode(raw)
except ValueError:
params = None
elif hasattr(raw, '__iter__'):
try:
dict(raw)
except ValueError:
params = None
except TypeError:
params = None
else:
params = list(raw.items() if isinstance(raw, dict) else raw)
params = decode_params_utf8(params)
else:
params = None
return params
def generate_nonce():
"""Generate pseudorandom nonce that is unlikely to repeat.
Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
Per `section 3.2.1`_ of the MAC Access Authentication spec.
A random 64-bit number is appended to the epoch timestamp for both
randomness and to decrease the likelihood of collisions.
.. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
.. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
return unicode_type(unicode_type(random.getrandbits(64)) + generate_timestamp())
def generate_timestamp():
"""Get seconds since epoch (UTC).
Per `section 3.3`_ of the OAuth 1 RFC 5849 spec.
Per `section 3.2.1`_ of the MAC Access Authentication spec.
.. _`section 3.2.1`: http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-01#section-3.2.1
.. _`section 3.3`: http://tools.ietf.org/html/rfc5849#section-3.3
"""
return unicode_type(int(time.time()))
def generate_token(length=30, chars=UNICODE_ASCII_CHARACTER_SET):
"""Generates a non-guessable OAuth token
OAuth (1 and 2) does not specify the format of tokens except that they
should be strings of random characters. Tokens should not be guessable
and entropy when generating the random characters is important. Which is
why SystemRandom is used instead of the default random.choice method.
"""
rand = random.SystemRandom()
return ''.join(rand.choice(chars) for x in range(length))
def generate_signed_token(private_pem, request):
import jwt
now = datetime.datetime.utcnow()
claims = {
'scope': request.scope,
'exp': now + datetime.timedelta(seconds=request.expires_in)
}
claims.update(request.claims)
token = jwt.encode(claims, private_pem, 'RS256')
token = to_unicode(token, "UTF-8")
return token
def verify_signed_token(public_pem, token):
import jwt
return jwt.decode(token, public_pem, algorithms=['RS256'])
def generate_client_id(length=30, chars=CLIENT_ID_CHARACTER_SET):
"""Generates an OAuth client_id
OAuth 2 specify the format of client_id in
http://tools.ietf.org/html/rfc6749#appendix-A.
"""
return generate_token(length, chars)
def add_params_to_qs(query, params):
"""Extend a query with a list of two-tuples."""
if isinstance(params, dict):
params = params.items()
queryparams = urlparse.parse_qsl(query, keep_blank_values=True)
queryparams.extend(params)
return urlencode(queryparams)
def add_params_to_uri(uri, params, fragment=False):
"""Add a list of two-tuples to the uri query components."""
sch, net, path, par, query, fra = urlparse.urlparse(uri)
if fragment:
fra = add_params_to_qs(fra, params)
else:
query = add_params_to_qs(query, params)
return urlparse.urlunparse((sch, net, path, par, query, fra))
def safe_string_equals(a, b):
""" Near-constant time string comparison.
Used in order to avoid timing attacks on sensitive information such
as secret keys during request verification (`rootLabs`_).
.. _`rootLabs`: http://rdist.root.org/2010/01/07/timing-independent-array-comparison/
"""
if len(a) != len(b):
return False
result = 0
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
def to_unicode(data, encoding='UTF-8'):
"""Convert a number of different types of objects to unicode."""
if isinstance(data, unicode_type):
return data
if isinstance(data, bytes_type):
return unicode_type(data, encoding=encoding)
if hasattr(data, '__iter__'):
try:
dict(data)
except TypeError:
pass
except ValueError:
# Assume it's a one dimensional data structure
return (to_unicode(i, encoding) for i in data)
else:
# We support 2.6 which lacks dict comprehensions
if hasattr(data, 'items'):
data = data.items()
return dict(((to_unicode(k, encoding), to_unicode(v, encoding)) for k, v in data))
return data
class CaseInsensitiveDict(dict):
"""Basic case insensitive dict with strings only keys."""
proxy = {}
def __init__(self, data):
self.proxy = dict((k.lower(), k) for k in data)
for k in data:
self[k] = data[k]
def __contains__(self, k):
return k.lower() in self.proxy
def __delitem__(self, k):
key = self.proxy[k.lower()]
super(CaseInsensitiveDict, self).__delitem__(key)
del self.proxy[k.lower()]
def __getitem__(self, k):
key = self.proxy[k.lower()]
return super(CaseInsensitiveDict, self).__getitem__(key)
def get(self, k, default=None):
return self[k] if k in self else default
def __setitem__(self, k, v):
super(CaseInsensitiveDict, self).__setitem__(k, v)
self.proxy[k.lower()] = k
def update(self, *args, **kwargs):
super(CaseInsensitiveDict, self).update(*args, **kwargs)
for k in dict(*args, **kwargs):
self.proxy[k.lower()] = k
class Request(object):
"""A malleable representation of a signable HTTP request.
Body argument may contain any data, but parameters will only be decoded if
they are one of:
* urlencoded query string
* dict
* list of 2-tuples
Anything else will be treated as raw body data to be passed through
unmolested.
"""
def __init__(self, uri, http_method='GET', body=None, headers=None,
encoding='utf-8'):
# Convert to unicode using encoding if given, else assume unicode
encode = lambda x: to_unicode(x, encoding) if encoding else x
self.uri = encode(uri)
self.http_method = encode(http_method)
self.headers = CaseInsensitiveDict(encode(headers or {}))
self.body = encode(body)
self.decoded_body = extract_params(self.body)
self.oauth_params = []
self.validator_log = {}
self._params = {
"access_token": None,
"client": None,
"client_id": None,
"client_secret": None,
"code": None,
"extra_credentials": None,
"grant_type": None,
"redirect_uri": None,
"refresh_token": None,
"request_token": None,
"response_type": None,
"scope": None,
"scopes": None,
"state": None,
"token": None,
"user": None,
"token_type_hint": None,
# OpenID Connect
"response_mode": None,
"nonce": None,
"display": None,
"prompt": None,
"claims": None,
"max_age": None,
"ui_locales": None,
"id_token_hint": None,
"login_hint": None,
"acr_values": None
}
self._params.update(dict(urldecode(self.uri_query)))
self._params.update(dict(self.decoded_body or []))
self._params.update(self.headers)
def __getattr__(self, name):
if name in self._params:
return self._params[name]
else:
raise AttributeError(name)
def __repr__(self):
body = self.body
headers = self.headers.copy()
if body:
body = SANITIZE_PATTERN.sub('\1<SANITIZED>', str(body))
if 'Authorization' in headers:
headers['Authorization'] = '<SANITIZED>'
return '<oauthlib.Request url="%s", http_method="%s", headers="%s", body="%s">' % (
self.uri, self.http_method, headers, body)
@property
def uri_query(self):
return urlparse.urlparse(self.uri).query
@property
def uri_query_params(self):
if not self.uri_query:
return []
return urlparse.parse_qsl(self.uri_query, keep_blank_values=True,
strict_parsing=True)
@property
def duplicate_params(self):
seen_keys = collections.defaultdict(int)
all_keys = (p[0]
for p in (self.decoded_body or []) + self.uri_query_params)
for k in all_keys:
seen_keys[k] += 1
return [k for k, c in seen_keys.items() if c > 1]
| gpl-3.0 |
iledarn/addons-yelizariev | project_tags/project.py | 16 | 1466 | # -*- coding: utf-8 -*-
##############################################################################
#
# Project Tags
# Copyright (C) 2013 Sistemas ADHOC
# No email
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import re
from openerp import netsvc
from openerp.osv import osv, fields
class project(osv.osv):
""""""
_name = 'project.project'
_inherits = { }
_inherit = [ 'project.project' ]
_columns = {
'project_tag_ids': fields.many2many('project_tags.project_tag', 'project_tags___project_tag_ids_rel', 'project_id', 'project_tag_id', string='Tags'),
}
_defaults = {
}
_constraints = [
]
project()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| lgpl-3.0 |
openrazer/openrazer | daemon/openrazer_daemon/dbus_services/service.py | 3 | 3832 | """
Service Object for DBus
"""
# Disable some pylint stuff
# pylint: disable=no-member
import types
import dbus
import dbus.service
def copy_func(function_reference, name=None):
"""
Copy function
:param function_reference: Function
:type function_reference: func
:param name: Name of function
:type name: str
:return: Copy of function
:rtype: func
"""
if hasattr(function_reference, 'code'):
return types.FunctionType(function_reference.code, function_reference.globals, name or function_reference.func_name, function_reference.defaults, function_reference.closure)
else:
return types.FunctionType(function_reference.__code__, function_reference.__globals__, name or function_reference.func_name, function_reference.__defaults__, function_reference.__closure__)
class DBusService(dbus.service.Object):
"""
DBus Service object
Allows for dynamic method adding
"""
BUS_NAME = 'org.razer'
def __init__(self, object_path):
"""
Init the object
:param object_path: DBus Object name
:type object_path: str
"""
# We could pass (bus, object_path) here, but we rather register the object manually.
super().__init__()
bus = dbus.SessionBus()
# the constructor of BusName registers the bus, the returned object is not used but must be kept
self.bus_name_obj = dbus.service.BusName(self.BUS_NAME, bus)
self.add_to_connection(bus, object_path)
def add_dbus_method(self, interface_name, function_name, function, in_signature=None, out_signature=None, byte_arrays=False):
"""
Add method to DBus Object
:param interface_name: DBus interface name
:type interface_name: str
:param function_name: DBus function name
:type function_name: str
:param function: Function reference
:type function: object
:param in_signature: DBus function signature
:type in_signature: str
:param out_signature: DBus function signature
:type out_signature: str
:param byte_arrays: Is byte array
:type byte_arrays: bool
"""
# Get class key for use in the DBus introspection table
class_key = [key for key in self._dbus_class_table.keys() if key.endswith(self.__class__.__name__)][0]
# Create a copy of the function so that if its used multiple times it won't affect other instances if the names changed
function_deepcopy = copy_func(function, function_name)
func = dbus.service.method(interface_name, in_signature=in_signature, out_signature=out_signature, byte_arrays=byte_arrays)(function_deepcopy)
# Add method to DBus tables
try:
self._dbus_class_table[class_key][interface_name][function_name] = func
except KeyError:
self._dbus_class_table[class_key][interface_name] = {function_name: func}
# Add method to class as DBus expects it to be there.
setattr(self.__class__, function_name, func)
def del_dbus_method(self, interface_name, function_name):
"""
Remove method from DBus Object
:param interface_name: DBus interface name
:type interface_name: str
:param function_name: DBus function name
:type function_name: str
"""
# Get class key for use in the DBus introspection table
class_key = [key for key in self._dbus_class_table.keys() if key.endswith(self.__class__.__name__)][0]
# Remove method from DBus tables
# Remove method from class
try:
del self._dbus_class_table[class_key][interface_name][function_name]
delattr(DBusService, function_name)
except (KeyError, AttributeError):
pass
| gpl-2.0 |
dcroc16/skunk_works | google_appengine/lib/django-1.3/tests/regressiontests/queries/tests.py | 22 | 71043 | import datetime
import pickle
import sys
from django.conf import settings
from django.core.exceptions import FieldError
from django.db import DatabaseError, connection, connections, DEFAULT_DB_ALIAS
from django.db.models import Count
from django.db.models.query import Q, ITER_CHUNK_SIZE, EmptyQuerySet
from django.test import TestCase, skipUnlessDBFeature
from django.utils import unittest
from django.utils.datastructures import SortedDict
from models import (Annotation, Article, Author, Celebrity, Child, Cover, Detail,
DumbCategory, ExtraInfo, Fan, Item, LeafA, LoopX, LoopZ, ManagedModel,
Member, NamedCategory, Note, Number, Plaything, PointerA, Ranking, Related,
Report, ReservedName, Tag, TvChef, Valid, X, Food, Eaten, Node, ObjectA, ObjectB,
ObjectC)
class BaseQuerysetTest(TestCase):
def assertValueQuerysetEqual(self, qs, values):
return self.assertQuerysetEqual(qs, values, transform=lambda x: x)
def assertRaisesMessage(self, exc, msg, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception, e:
self.assertEqual(msg, str(e))
self.assertTrue(isinstance(e, exc), "Expected %s, got %s" % (exc, type(e)))
else:
if hasattr(exc, '__name__'):
excName = exc.__name__
else:
excName = str(exc)
raise AssertionError("%s not raised" % excName)
class Queries1Tests(BaseQuerysetTest):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
self.t1 = Tag.objects.create(name='t1', category=generic)
self.t2 = Tag.objects.create(name='t2', parent=self.t1, category=generic)
self.t3 = Tag.objects.create(name='t3', parent=self.t1)
t4 = Tag.objects.create(name='t4', parent=self.t3)
self.t5 = Tag.objects.create(name='t5', parent=self.t3)
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
self.n3 = Note.objects.create(note='n3', misc='foo', id=3)
ann1 = Annotation.objects.create(name='a1', tag=self.t1)
ann1.notes.add(self.n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
ann2.notes.add(n2, self.n3)
# Create these out of order so that sorting by 'id' will be different to sorting
# by 'info'. Helps detect some problems later.
self.e2 = ExtraInfo.objects.create(info='e2', note=n2)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=self.e2)
self.a4 = Author.objects.create(name='a4', num=4004, extra=self.e2)
self.time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
self.time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
self.i1 = Item.objects.create(name='one', created=self.time1, modified=self.time1, creator=self.a1, note=self.n3)
self.i1.tags = [self.t1, self.t2]
self.i2 = Item.objects.create(name='two', created=self.time2, creator=self.a2, note=n2)
self.i2.tags = [self.t1, self.t3]
self.i3 = Item.objects.create(name='three', created=time3, creator=self.a2, note=self.n3)
i4 = Item.objects.create(name='four', created=time4, creator=self.a4, note=self.n3)
i4.tags = [t4]
self.r1 = Report.objects.create(name='r1', creator=self.a1)
Report.objects.create(name='r2', creator=a3)
Report.objects.create(name='r3')
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
self.rank1 = Ranking.objects.create(rank=2, author=self.a2)
Cover.objects.create(title="first", item=i4)
Cover.objects.create(title="second", item=self.i2)
def test_ticket1050(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=True),
['<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__id__isnull=True),
['<Item: three>']
)
def test_ticket1801(self):
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i3),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(item=self.i2) & Author.objects.filter(item=self.i3),
['<Author: a2>']
)
def test_ticket2306(self):
# Checking that no join types are "left outer" joins.
query = Item.objects.filter(tags=self.t2).query
self.assertTrue(query.LOUTER not in [x[2] for x in query.alias_map.values()])
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(tags=self.t2)),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1)).filter(Q(creator__name='fred')|Q(tags=self.t2)),
['<Item: one>']
)
# Each filter call is processed "at once" against a single table, so this is
# different from the previous example as it tries to find tags that are two
# things at once (rather than two tags).
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1) & Q(tags=self.t2)),
[]
)
self.assertQuerysetEqual(
Item.objects.filter(Q(tags=self.t1), Q(creator__name='fred')|Q(tags=self.t2)),
[]
)
qs = Author.objects.filter(ranking__rank=2, ranking__id=self.rank1.id)
self.assertQuerysetEqual(list(qs), ['<Author: a2>'])
self.assertEqual(2, qs.query.count_active_tables(), 2)
qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=self.rank1.id)
self.assertEqual(qs.query.count_active_tables(), 3)
def test_ticket4464(self):
self.assertQuerysetEqual(
Item.objects.filter(tags=self.t1).filter(tags=self.t2),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name'),
['<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).filter(tags=self.t3),
['<Item: two>']
)
# Make sure .distinct() works with slicing (this was broken in Oracle).
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).order_by('name')[:3],
['<Item: one>', '<Item: one>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[self.t1, self.t2]).distinct().order_by('name')[:3],
['<Item: one>', '<Item: two>']
)
def test_tickets_2080_3592(self):
self.assertQuerysetEqual(
Author.objects.filter(item__name='one') | Author.objects.filter(name='a3'),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='one') | Q(name='a3')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(name='a3') | Q(item__name='one')),
['<Author: a1>', '<Author: a3>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(item__name='three') | Q(report__name='r3')),
['<Author: a2>']
)
def test_ticket6074(self):
# Merging two empty result sets shouldn't leave a queryset with no constraints
# (which would match everything).
self.assertQuerysetEqual(Author.objects.filter(Q(id__in=[])), [])
self.assertQuerysetEqual(
Author.objects.filter(Q(id__in=[])|Q(id__in=[])),
[]
)
def test_tickets_1878_2939(self):
self.assertEqual(Item.objects.values('creator').distinct().count(), 3)
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
xx = Item(name='four', created=self.time1, creator=self.a2, note=self.n1)
xx.save()
self.assertEqual(
Item.objects.exclude(name='two').values('creator', 'name').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count(),
4
)
self.assertEqual(
Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count(),
4
)
xx.delete()
def test_ticket7323(self):
self.assertEqual(Item.objects.values('creator', 'name').count(), 4)
def test_ticket2253(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
self.assertQuerysetEqual(
q1,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(q2, ['<Item: one>'])
self.assertQuerysetEqual(
(q1 | q2).order_by('name'),
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual((q1 & q2).order_by('name'), ['<Item: one>'])
q1 = Item.objects.filter(tags=self.t1)
q2 = Item.objects.filter(note=self.n3, tags=self.t2)
q3 = Item.objects.filter(creator=self.a4)
self.assertQuerysetEqual(
((q1 & q2) | q3).order_by('name'),
['<Item: four>', '<Item: one>']
)
# FIXME: This is difficult to fix and very much an edge case, so punt for
# now. This is related to the order_by() tests for ticket #2253, but the
# old bug exhibited itself here (q2 was pulling too many tables into the
# combined query with the new ordering, but only because we have evaluated
# q2 already).
@unittest.expectedFailure
def test_order_by_tables(self):
q1 = Item.objects.order_by('name')
q2 = Item.objects.filter(id=self.i1.id)
list(q2)
self.assertEqual(len((q1 & q2).order_by('name').query.tables), 1)
def test_tickets_4088_4306(self):
self.assertQuerysetEqual(
Report.objects.filter(creator=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__num=1001),
['<Report: r1>']
)
self.assertQuerysetEqual(Report.objects.filter(creator__id=1001), [])
self.assertQuerysetEqual(
Report.objects.filter(creator__id=self.a1.id),
['<Report: r1>']
)
self.assertQuerysetEqual(
Report.objects.filter(creator__name='a1'),
['<Report: r1>']
)
def test_ticket4510(self):
self.assertQuerysetEqual(
Author.objects.filter(report__name='r1'),
['<Author: a1>']
)
def test_ticket7378(self):
self.assertQuerysetEqual(self.a1.report_set.all(), ['<Report: r1>'])
def test_tickets_5324_6704(self):
self.assertQuerysetEqual(
Item.objects.filter(tags__name='t4'),
['<Item: four>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct(),
['<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse(),
['<Item: two>', '<Item: three>', '<Item: one>']
)
self.assertQuerysetEqual(
Author.objects.exclude(item__name='one').distinct().order_by('name'),
['<Author: a2>', '<Author: a3>', '<Author: a4>']
)
# Excluding across a m2m relation when there is more than one related
# object associated was problematic.
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1').exclude(tags__name='t4'),
['<Item: three>']
)
# Excluding from a relation that cannot be NULL should not use outer joins.
query = Item.objects.exclude(creator__in=[self.a1, self.a2]).query
self.assertTrue(query.LOUTER not in [x[2] for x in query.alias_map.values()])
# Similarly, when one of the joins cannot possibly, ever, involve NULL
# values (Author -> ExtraInfo, in the following), it should never be
# promoted to a left outer join. So the following query should only
# involve one "left outer" join (Author -> Item is 0-to-many).
qs = Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1)|Q(item__note=self.n3))
self.assertEqual(
len([x[2] for x in qs.query.alias_map.values() if x[2] == query.LOUTER and qs.query.alias_refcount[x[1]]]),
1
)
# The previous changes shouldn't affect nullable foreign key joins.
self.assertQuerysetEqual(
Tag.objects.filter(parent__isnull=True).order_by('name'),
['<Tag: t1>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__isnull=True).order_by('name'),
['<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name'),
['<Tag: t4>', '<Tag: t5>']
)
def test_ticket2091(self):
t = Tag.objects.get(name='t4')
self.assertQuerysetEqual(
Item.objects.filter(tags__in=[t]),
['<Item: four>']
)
def test_heterogeneous_qs_combination(self):
# Combining querysets built on different models should behave in a well-defined
# fashion. We raise an error.
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() & Tag.objects.all()
)
self.assertRaisesMessage(
AssertionError,
'Cannot combine queries on two different base models.',
lambda: Author.objects.all() | Tag.objects.all()
)
def test_ticket3141(self):
self.assertEqual(Author.objects.extra(select={'foo': '1'}).count(), 4)
self.assertEqual(
Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count(),
4
)
def test_ticket2400(self):
self.assertQuerysetEqual(
Author.objects.filter(item__isnull=True),
['<Author: a3>']
)
self.assertQuerysetEqual(
Tag.objects.filter(item__isnull=True),
['<Tag: t5>']
)
def test_ticket2496(self):
self.assertQuerysetEqual(
Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1],
['<Item: four>']
)
def test_tickets_2076_7256(self):
# Ordering on related tables should be possible, even if the table is
# not otherwise involved.
self.assertQuerysetEqual(
Item.objects.order_by('note__note', 'name'),
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# Ordering on a related field should use the remote model's default
# ordering as a final step.
self.assertQuerysetEqual(
Author.objects.order_by('extra', '-name'),
['<Author: a2>', '<Author: a1>', '<Author: a4>', '<Author: a3>']
)
# Using remote model default ordering can span multiple models (in this
# case, Cover is ordered by Item's default, which uses Note's default).
self.assertQuerysetEqual(
Cover.objects.all(),
['<Cover: first>', '<Cover: second>']
)
# If the remote model does not have a default ordering, we order by its 'id'
# field.
self.assertQuerysetEqual(
Item.objects.order_by('creator', 'name'),
['<Item: one>', '<Item: three>', '<Item: two>', '<Item: four>']
)
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That
# isn't Django's problem. Garbage in, garbage out.
self.assertQuerysetEqual(
Item.objects.filter(tags__isnull=False).order_by('tags', 'id'),
['<Item: one>', '<Item: two>', '<Item: one>', '<Item: two>', '<Item: four>']
)
# If we replace the default ordering, Django adjusts the required
# tables automatically. Item normally requires a join with Note to do
# the default ordering, but that isn't needed here.
qs = Item.objects.order_by('name')
self.assertQuerysetEqual(
qs,
['<Item: four>', '<Item: one>', '<Item: three>', '<Item: two>']
)
self.assertEqual(len(qs.query.tables), 1)
def test_tickets_2874_3002(self):
qs = Item.objects.select_related().order_by('note__note', 'name')
self.assertQuerysetEqual(
qs,
['<Item: two>', '<Item: four>', '<Item: one>', '<Item: three>']
)
# This is also a good select_related() test because there are multiple
# Note entries in the SQL. The two Note items should be different.
self.assertTrue(repr(qs[0].note), '<Note: n2>')
self.assertEqual(repr(qs[0].creator.extra.note), '<Note: n1>')
def test_ticket3037(self):
self.assertQuerysetEqual(
Item.objects.filter(Q(creator__name='a3', name='two')|Q(creator__name='a4', name='four')),
['<Item: four>']
)
def test_tickets_5321_7070(self):
# Ordering columns must be included in the output columns. Note that
# this means results that might otherwise be distinct are not (if there
# are multiple values in the ordering cols), as in this example. This
# isn't a bug; it's a warning to be careful with the selection of
# ordering columns.
self.assertValueQuerysetEqual(
Note.objects.values('misc').distinct().order_by('note', '-misc'),
[{'misc': u'foo'}, {'misc': u'bar'}, {'misc': u'foo'}]
)
def test_ticket4358(self):
# If you don't pass any fields to values(), relation fields are
# returned as "foo_id" keys, not "foo". For consistency, you should be
# able to pass "foo_id" in the fields list and have it work, too. We
# actually allow both "foo" and "foo_id".
# The *_id version is returned by default.
self.assertTrue('note_id' in ExtraInfo.objects.values()[0])
# You can also pass it in explicitly.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note_id'),
[{'note_id': 1}, {'note_id': 2}]
)
# ...or use the field name.
self.assertValueQuerysetEqual(
ExtraInfo.objects.values('note'),
[{'note': 1}, {'note': 2}]
)
def test_ticket2902(self):
# Parameters can be given to extra_select, *if* you use a SortedDict.
# (First we need to know which order the keys fall in "naturally" on
# your system, so we can put things in the wrong way around from
# normal. A normal dict would thus fail.)
s = [('a', '%s'), ('b', '%s')]
params = ['one', 'two']
if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
s.reverse()
params.reverse()
# This slightly odd comparison works around the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
d = Item.objects.extra(select=SortedDict(s), select_params=params).values('a', 'b')[0]
self.assertEqual(d, {'a': u'one', 'b': u'two'})
# Order by the number of tags attached to an item.
l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count')
self.assertEqual([o.count for o in l], [2, 2, 1, 0])
def test_ticket6154(self):
# Multiple filter statements are joined using "AND" all the time.
self.assertQuerysetEqual(
Author.objects.filter(id=self.a1.id).filter(Q(extra__note=self.n1)|Q(item__note=self.n3)),
['<Author: a1>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(extra__note=self.n1)|Q(item__note=self.n3)).filter(id=self.a1.id),
['<Author: a1>']
)
def test_ticket6981(self):
self.assertQuerysetEqual(
Tag.objects.select_related('parent').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket9926(self):
self.assertQuerysetEqual(
Tag.objects.select_related("parent", "category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Tag.objects.select_related('parent', "parent__category").order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t3>', '<Tag: t4>', '<Tag: t5>']
)
def test_tickets_6180_6203(self):
# Dates with limits and/or counts
self.assertEqual(Item.objects.count(), 4)
self.assertEqual(Item.objects.dates('created', 'month').count(), 1)
self.assertEqual(Item.objects.dates('created', 'day').count(), 2)
self.assertEqual(len(Item.objects.dates('created', 'day')), 2)
self.assertEqual(Item.objects.dates('created', 'day')[0], datetime.datetime(2007, 12, 19, 0, 0))
def test_tickets_7087_12242(self):
# Dates with extra select columns
self.assertQuerysetEqual(
Item.objects.dates('created', 'day').extra(select={'a': 1}),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(select={'a': 1}).dates('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)', 'datetime.datetime(2007, 12, 20, 0, 0)']
)
name="one"
self.assertQuerysetEqual(
Item.objects.dates('created', 'day').extra(where=['name=%s'], params=[name]),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
self.assertQuerysetEqual(
Item.objects.extra(where=['name=%s'], params=[name]).dates('created', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7155(self):
# Nullable dates
self.assertQuerysetEqual(
Item.objects.dates('modified', 'day'),
['datetime.datetime(2007, 12, 19, 0, 0)']
)
def test_ticket7098(self):
# Make sure semi-deprecated ordering by related models syntax still
# works.
self.assertValueQuerysetEqual(
Item.objects.values('note__note').order_by('queries_note.note', 'id'),
[{'note__note': u'n2'}, {'note__note': u'n3'}, {'note__note': u'n3'}, {'note__note': u'n3'}]
)
def test_ticket7096(self):
# Make sure exclude() with multiple conditions continues to work.
self.assertQuerysetEqual(
Tag.objects.filter(parent=self.t1, name='t3').order_by('name'),
['<Tag: t3>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent=self.t1, name='t3').order_by('name'),
['<Tag: t1>', '<Tag: t2>', '<Tag: t4>', '<Tag: t5>']
)
self.assertQuerysetEqual(
Item.objects.exclude(tags__name='t1', name='one').order_by('name').distinct(),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__in=['three', 'four']).exclude(tags__name='t1').order_by('name'),
['<Item: four>', '<Item: three>']
)
# More twisted cases, involving nested negations.
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one')),
['<Item: one>']
)
self.assertQuerysetEqual(
Item.objects.filter(~Q(tags__name='t1', name='one'), name='two'),
['<Item: two>']
)
self.assertQuerysetEqual(
Item.objects.exclude(~Q(tags__name='t1', name='one'), name='two'),
['<Item: four>', '<Item: one>', '<Item: three>']
)
def test_tickets_7204_7506(self):
# Make sure querysets with related fields can be pickled. If this
# doesn't crash, it's a Good Thing.
pickle.dumps(Item.objects.all())
def test_ticket7813(self):
# We should also be able to pickle things that use select_related().
# The only tricky thing here is to ensure that we do the related
# selections properly after unpickling.
qs = Item.objects.select_related()
query = qs.query.get_compiler(qs.db).as_sql()[0]
query2 = pickle.loads(pickle.dumps(qs.query))
self.assertEqual(
query2.get_compiler(qs.db).as_sql()[0],
query
)
def test_deferred_load_qs_pickling(self):
# Check pickling of deferred-loading querysets
qs = Item.objects.defer('name', 'creator')
q2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(list(qs), list(q2))
q3 = pickle.loads(pickle.dumps(qs, pickle.HIGHEST_PROTOCOL))
self.assertEqual(list(qs), list(q3))
def test_ticket7277(self):
self.assertQuerysetEqual(
self.n1.annotation_set.filter(Q(tag=self.t5) | Q(tag__children=self.t5) | Q(tag__children__children=self.t5)),
['<Annotation: a1>']
)
def test_tickets_7448_7707(self):
# Complex objects should be converted to strings before being used in
# lookups.
self.assertQuerysetEqual(
Item.objects.filter(created__in=[self.time1, self.time2]),
['<Item: one>', '<Item: two>']
)
def test_ticket7235(self):
# An EmptyQuerySet should not raise exceptions if it is filtered.
q = EmptyQuerySet()
self.assertQuerysetEqual(q.all(), [])
self.assertQuerysetEqual(q.filter(x=10), [])
self.assertQuerysetEqual(q.exclude(y=3), [])
self.assertQuerysetEqual(q.complex_filter({'pk': 1}), [])
self.assertQuerysetEqual(q.select_related('spam', 'eggs'), [])
self.assertQuerysetEqual(q.annotate(Count('eggs')), [])
self.assertQuerysetEqual(q.order_by('-pub_date', 'headline'), [])
self.assertQuerysetEqual(q.distinct(), [])
self.assertQuerysetEqual(
q.extra(select={'is_recent': "pub_date > '2006-01-01'"}),
[]
)
q.query.low_mark = 1
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken',
q.extra, select={'is_recent': "pub_date > '2006-01-01'"}
)
self.assertQuerysetEqual(q.reverse(), [])
self.assertQuerysetEqual(q.defer('spam', 'eggs'), [])
self.assertQuerysetEqual(q.only('spam', 'eggs'), [])
def test_ticket7791(self):
# There were "issues" when ordering and distinct-ing on fields related
# via ForeignKeys.
self.assertEqual(
len(Note.objects.order_by('extrainfo__info').distinct()),
3
)
# Pickling of DateQuerySets used to fail
qs = Item.objects.dates('created', 'month')
_ = pickle.loads(pickle.dumps(qs))
def test_ticket9997(self):
# If a ValuesList or Values queryset is passed as an inner query, we
# make sure it's only requesting a single value and use that as the
# thing to select.
self.assertQuerysetEqual(
Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name')),
['<Tag: t2>', '<Tag: t3>']
)
# Multi-valued values() and values_list() querysets should raise errors.
self.assertRaisesMessage(
TypeError,
'Cannot use a multi-field ValuesQuerySet as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
)
self.assertRaisesMessage(
TypeError,
'Cannot use a multi-field ValuesListQuerySet as a filter value.',
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
)
def test_ticket9985(self):
# qs.values_list(...).values(...) combinations should work.
self.assertValueQuerysetEqual(
Note.objects.values_list("note", flat=True).values("id").order_by("id"),
[{'id': 1}, {'id': 2}, {'id': 3}]
)
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1").values_list('note').values('id')),
['<Annotation: a1>']
)
def test_ticket10205(self):
# When bailing out early because of an empty "__in" filter, we need
# to set things up correctly internally so that subqueries can continue properly.
self.assertEqual(Tag.objects.filter(name__in=()).update(name="foo"), 0)
def test_ticket10432(self):
# Testing an empty "__in" filter with a generator as the value.
def f():
return iter([])
n_obj = Note.objects.all()[0]
def g():
for i in [n_obj.pk]:
yield i
self.assertQuerysetEqual(Note.objects.filter(pk__in=f()), [])
self.assertEqual(list(Note.objects.filter(pk__in=g())), [n_obj])
def test_ticket10742(self):
# Queries used in an __in clause don't execute subqueries
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.filter(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a1>', '<Author: a2>'])
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
subq = Author.objects.filter(num__lt=3000)
qs = Author.objects.exclude(pk__in=subq)
self.assertQuerysetEqual(qs, ['<Author: a3>', '<Author: a4>'])
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
subq = Author.objects.filter(num__lt=3000)
self.assertQuerysetEqual(
Author.objects.filter(Q(pk__in=subq) & Q(name='a1')),
['<Author: a1>']
)
# The subquery result cache should not be populated
self.assertTrue(subq._result_cache is None)
def test_ticket7076(self):
# Excluding shouldn't eliminate NULL entries.
self.assertQuerysetEqual(
Item.objects.exclude(modified=self.time1).order_by('name'),
['<Item: four>', '<Item: three>', '<Item: two>']
)
self.assertQuerysetEqual(
Tag.objects.exclude(parent__name=self.t1.name),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
def test_ticket7181(self):
# Ordering by related tables should accomodate nullable fields (this
# test is a little tricky, since NULL ordering is database dependent.
# Instead, we just count the number of results).
self.assertEqual(len(Tag.objects.order_by('parent__name')), 5)
# Empty querysets can be merged with others.
self.assertQuerysetEqual(
Note.objects.none() | Note.objects.all(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(
Note.objects.all() | Note.objects.none(),
['<Note: n1>', '<Note: n2>', '<Note: n3>']
)
self.assertQuerysetEqual(Note.objects.none() & Note.objects.all(), [])
self.assertQuerysetEqual(Note.objects.all() & Note.objects.none(), [])
def test_ticket9411(self):
# Make sure bump_prefix() (an internal Query method) doesn't (re-)break. It's
# sufficient that this query runs without error.
qs = Tag.objects.values_list('id', flat=True).order_by('id')
qs.query.bump_prefix()
first = qs[0]
self.assertEqual(list(qs), range(first, first+5))
def test_ticket8439(self):
# Complex combinations of conjunctions, disjunctions and nullable
# relations.
self.assertQuerysetEqual(
Author.objects.filter(Q(item__note__extrainfo=self.e2)|Q(report=self.r1, name='xyz')),
['<Author: a2>']
)
self.assertQuerysetEqual(
Author.objects.filter(Q(report=self.r1, name='xyz')|Q(item__note__extrainfo=self.e2)),
['<Author: a2>']
)
self.assertQuerysetEqual(
Annotation.objects.filter(Q(tag__parent=self.t1)|Q(notes__note='n1', name='a1')),
['<Annotation: a1>']
)
xx = ExtraInfo.objects.create(info='xx', note=self.n3)
self.assertQuerysetEqual(
Note.objects.filter(Q(extrainfo__author=self.a1)|Q(extrainfo=xx)),
['<Note: n1>', '<Note: n3>']
)
xx.delete()
q = Note.objects.filter(Q(extrainfo__author=self.a1)|Q(extrainfo=xx)).query
self.assertEqual(
len([x[2] for x in q.alias_map.values() if x[2] == q.LOUTER and q.alias_refcount[x[1]]]),
1
)
class Queries2Tests(TestCase):
def setUp(self):
Number.objects.create(num=4)
Number.objects.create(num=8)
Number.objects.create(num=12)
def test_ticket4289(self):
# A slight variation on the restricting the filtering choices by the
# lookup constraints.
self.assertQuerysetEqual(Number.objects.filter(num__lt=4), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=8, num__lt=12), [])
self.assertQuerysetEqual(
Number.objects.filter(num__gt=8, num__lt=13),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4)),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4)),
['<Number: 8>']
)
def test_ticket12239(self):
# Float was being rounded to integer on gte queries on integer field. Tests
# show that gt, lt, gte, and lte work as desired. Note that the fix changes
# get_prep_lookup for gte and lt queries only.
self.assertQuerysetEqual(
Number.objects.filter(num__gt=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gt=12), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.0), [])
self.assertQuerysetEqual(Number.objects.filter(num__gt=12.1), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12),
['<Number: 4>', '<Number: 8>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.0),
['<Number: 4>', '<Number: 8>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lt=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=11.9),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12),
['<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__gte=12.0),
['<Number: 12>']
)
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.1), [])
self.assertQuerysetEqual(Number.objects.filter(num__gte=12.9), [])
self.assertQuerysetEqual(
Number.objects.filter(num__lte=11.9),
['<Number: 4>', '<Number: 8>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.0),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.1),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
self.assertQuerysetEqual(
Number.objects.filter(num__lte=12.9),
['<Number: 4>', '<Number: 8>', '<Number: 12>']
)
def test_ticket7411(self):
# Saving to db must work even with partially read result set in another
# cursor.
for num in range(2 * ITER_CHUNK_SIZE + 1):
_ = Number.objects.create(num=num)
for i, obj in enumerate(Number.objects.all()):
obj.save()
if i > 10: break
def test_ticket7759(self):
# Count should work with a partially read result set.
count = Number.objects.count()
qs = Number.objects.all()
def run():
for obj in qs:
return qs.count() == count
self.assertTrue(run())
class Queries3Tests(BaseQuerysetTest):
def test_ticket7107(self):
# This shouldn't create an infinite loop.
self.assertQuerysetEqual(Valid.objects.all(), [])
def test_ticket8683(self):
# Raise proper error when a DateQuerySet gets passed a wrong type of
# field
self.assertRaisesMessage(
AssertionError,
"'name' isn't a DateField.",
Item.objects.dates, 'name', 'month'
)
class Queries4Tests(BaseQuerysetTest):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
self.t1 = Tag.objects.create(name='t1', category=generic)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
self.a1 = Author.objects.create(name='a1', num=1001, extra=e1)
self.a3 = Author.objects.create(name='a3', num=3003, extra=e2)
self.r1 = Report.objects.create(name='r1', creator=self.a1)
self.r2 = Report.objects.create(name='r2', creator=self.a3)
self.r3 = Report.objects.create(name='r3')
Item.objects.create(name='i1', created=datetime.datetime.now(), note=n1, creator=self.a1)
Item.objects.create(name='i2', created=datetime.datetime.now(), note=n1, creator=self.a3)
def test_ticket14876(self):
q1 = Report.objects.filter(Q(creator__isnull=True) | Q(creator__extra__info='e1'))
q2 = Report.objects.filter(Q(creator__isnull=True)) | Report.objects.filter(Q(creator__extra__info='e1'))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Report.objects.filter(Q(creator__extra__info='e1') | Q(creator__isnull=True))
q2 = Report.objects.filter(Q(creator__extra__info='e1')) | Report.objects.filter(Q(creator__isnull=True))
self.assertQuerysetEqual(q1, ["<Report: r1>", "<Report: r3>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator=self.a1) | Q(creator__report__name='r1')).order_by()
q2 = Item.objects.filter(Q(creator=self.a1)).order_by() | Item.objects.filter(Q(creator__report__name='r1')).order_by()
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
q1 = Item.objects.filter(Q(creator__report__name='e1') | Q(creator=self.a1)).order_by()
q2 = Item.objects.filter(Q(creator__report__name='e1')).order_by() | Item.objects.filter(Q(creator=self.a1)).order_by()
self.assertQuerysetEqual(q1, ["<Item: i1>"])
self.assertEqual(str(q1.query), str(q2.query))
def test_ticket7095(self):
# Updates that are filtered on the model being updated are somewhat
# tricky in MySQL. This exercises that case.
ManagedModel.objects.create(data='mm1', tag=self.t1, public=True)
self.assertEqual(ManagedModel.objects.update(data='mm'), 1)
# A values() or values_list() query across joined models must use outer
# joins appropriately.
# Note: In Oracle, we expect a null CharField to return u'' instead of
# None.
if connection.features.interprets_empty_strings_as_nulls:
expected_null_charfield_repr = u''
else:
expected_null_charfield_repr = None
self.assertValueQuerysetEqual(
Report.objects.values_list("creator__extra__info", flat=True).order_by("name"),
[u'e1', u'e2', expected_null_charfield_repr],
)
# Similarly for select_related(), joins beyond an initial nullable join
# must use outer joins so that all results are included.
self.assertQuerysetEqual(
Report.objects.select_related("creator", "creator__extra").order_by("name"),
['<Report: r1>', '<Report: r2>', '<Report: r3>']
)
# When there are multiple paths to a table from another table, we have
# to be careful not to accidentally reuse an inappropriate join when
# using select_related(). We used to return the parent's Detail record
# here by mistake.
d1 = Detail.objects.create(data="d1")
d2 = Detail.objects.create(data="d2")
m1 = Member.objects.create(name="m1", details=d1)
m2 = Member.objects.create(name="m2", details=d2)
Child.objects.create(person=m2, parent=m1)
obj = m1.children.select_related("person__details")[0]
self.assertEqual(obj.person.details.data, u'd2')
def test_order_by_resetting(self):
# Calling order_by() with no parameters removes any existing ordering on the
# model. But it should still be possible to add new ordering after that.
qs = Author.objects.order_by().order_by('name')
self.assertTrue('ORDER BY' in qs.query.get_compiler(qs.db).as_sql()[0])
def test_ticket10181(self):
# Avoid raising an EmptyResultSet if an inner query is probably
# empty (and hence, not executed).
self.assertQuerysetEqual(
Tag.objects.filter(id__in=Tag.objects.filter(id__in=[])),
[]
)
class Queries5Tests(TestCase):
def setUp(self):
# Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
# will be rank3, rank2, rank1.
n1 = Note.objects.create(note='n1', misc='foo', id=1)
n2 = Note.objects.create(note='n2', misc='bar', id=2)
e1 = ExtraInfo.objects.create(info='e1', note=n1)
e2 = ExtraInfo.objects.create(info='e2', note=n2)
a1 = Author.objects.create(name='a1', num=1001, extra=e1)
a2 = Author.objects.create(name='a2', num=2002, extra=e1)
a3 = Author.objects.create(name='a3', num=3003, extra=e2)
self.rank1 = Ranking.objects.create(rank=2, author=a2)
Ranking.objects.create(rank=1, author=a3)
Ranking.objects.create(rank=3, author=a1)
def test_ordering(self):
# Cross model ordering is possible in Meta, too.
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
self.assertQuerysetEqual(
Ranking.objects.all().order_by('rank'),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
# Ordering of extra() pieces is possible, too and you can mix extra
# fields and model fields in the ordering.
self.assertQuerysetEqual(
Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank']),
['<Ranking: 1: a3>', '<Ranking: 2: a2>', '<Ranking: 3: a1>']
)
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
self.assertEqual(
[o.good for o in qs.extra(order_by=('-good',))],
[True, False, False]
)
self.assertQuerysetEqual(
qs.extra(order_by=('-good', 'id')),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
# Despite having some extra aliases in the query, we can still omit
# them in a values() query.
dicts = qs.values('id', 'rank').order_by('id')
self.assertEqual(
[d.items()[1] for d in dicts],
[('rank', 2), ('rank', 1), ('rank', 3)]
)
def test_ticket7256(self):
# An empty values() call includes all aliases, including those from an
# extra()
qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
dicts = qs.values().order_by('id')
for d in dicts: del d['id']; del d['author_id']
self.assertEqual(
[sorted(d.items()) for d in dicts],
[[('good', 0), ('rank', 2)], [('good', 0), ('rank', 1)], [('good', 1), ('rank', 3)]]
)
def test_ticket7045(self):
# Extra tables used to crash SQL construction on the second use.
qs = Ranking.objects.extra(tables=['django_site'])
qs.query.get_compiler(qs.db).as_sql()
# test passes if this doesn't raise an exception.
qs.query.get_compiler(qs.db).as_sql()
def test_ticket9848(self):
# Make sure that updates which only filter on sub-tables don't
# inadvertently update the wrong records (bug #9848).
# Make sure that the IDs from different tables don't happen to match.
self.assertQuerysetEqual(
Ranking.objects.filter(author__name='a1'),
['<Ranking: 3: a1>']
)
self.assertEqual(
Ranking.objects.filter(author__name='a1').update(rank='4'),
1
)
r = Ranking.objects.filter(author__name='a1')[0]
self.assertNotEqual(r.id, r.author.id)
self.assertEqual(r.rank, 4)
r.rank = 3
r.save()
self.assertQuerysetEqual(
Ranking.objects.all(),
['<Ranking: 3: a1>', '<Ranking: 2: a2>', '<Ranking: 1: a3>']
)
def test_ticket5261(self):
self.assertQuerysetEqual(
Note.objects.exclude(Q()),
['<Note: n1>', '<Note: n2>']
)
class SelectRelatedTests(TestCase):
def test_tickets_3045_3288(self):
# Once upon a time, select_related() with circular relations would loop
# infinitely if you forgot to specify "depth". Now we set an arbitrary
# default upper bound.
self.assertQuerysetEqual(X.objects.all(), [])
self.assertQuerysetEqual(X.objects.select_related(), [])
class SubclassFKTests(TestCase):
def test_ticket7778(self):
# Model subclasses could not be deleted if a nullable foreign key
# relates to a model that relates back.
num_celebs = Celebrity.objects.count()
tvc = TvChef.objects.create(name="Huey")
self.assertEqual(Celebrity.objects.count(), num_celebs + 1)
Fan.objects.create(fan_of=tvc)
Fan.objects.create(fan_of=tvc)
tvc.delete()
# The parent object should have been deleted as well.
self.assertEqual(Celebrity.objects.count(), num_celebs)
class CustomPkTests(TestCase):
def test_ticket7371(self):
self.assertQuerysetEqual(Related.objects.order_by('custom'), [])
class NullableRelOrderingTests(TestCase):
def test_ticket10028(self):
# Ordering by model related to nullable relations(!) should use outer
# joins, so that all results are included.
_ = Plaything.objects.create(name="p1")
self.assertQuerysetEqual(
Plaything.objects.all(),
['<Plaything: p1>']
)
class DisjunctiveFilterTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
ExtraInfo.objects.create(info='e1', note=self.n1)
def test_ticket7872(self):
# Another variation on the disjunctive filtering theme.
# For the purposes of this regression test, it's important that there is no
# Join object releated to the LeafA we create.
LeafA.objects.create(data='first')
self.assertQuerysetEqual(LeafA.objects.all(), ['<LeafA: first>'])
self.assertQuerysetEqual(
LeafA.objects.filter(Q(data='first')|Q(join__b__data='second')),
['<LeafA: first>']
)
def test_ticket8283(self):
# Checking that applying filters after a disjunction works correctly.
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(note=self.n1)|ExtraInfo.objects.filter(info='e2')).filter(note=self.n1),
['<ExtraInfo: e1>']
)
self.assertQuerysetEqual(
(ExtraInfo.objects.filter(info='e2')|ExtraInfo.objects.filter(note=self.n1)).filter(note=self.n1),
['<ExtraInfo: e1>']
)
class Queries6Tests(TestCase):
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
t5 = Tag.objects.create(name='t5', parent=t3)
n1 = Note.objects.create(note='n1', misc='foo', id=1)
ann1 = Annotation.objects.create(name='a1', tag=t1)
ann1.notes.add(n1)
ann2 = Annotation.objects.create(name='a2', tag=t4)
# This next test used to cause really weird PostgreSQL behaviour, but it was
# only apparent much later when the full test suite ran.
#@unittest.expectedFailure
def test_slicing_and_cache_interaction(self):
# We can do slicing beyond what is currently in the result cache,
# too.
# We need to mess with the implementation internals a bit here to decrease the
# cache fill size so that we don't read all the results at once.
from django.db.models import query
query.ITER_CHUNK_SIZE = 2
qs = Tag.objects.all()
# Fill the cache with the first chunk.
self.assertTrue(bool(qs))
self.assertEqual(len(qs._result_cache), 2)
# Query beyond the end of the cache and check that it is filled out as required.
self.assertEqual(repr(qs[4]), '<Tag: t5>')
self.assertEqual(len(qs._result_cache), 5)
# But querying beyond the end of the result set will fail.
self.assertRaises(IndexError, lambda: qs[100])
def test_parallel_iterators(self):
# Test that parallel iterators work.
qs = Tag.objects.all()
i1, i2 = iter(qs), iter(qs)
self.assertEqual(repr(i1.next()), '<Tag: t1>')
self.assertEqual(repr(i1.next()), '<Tag: t2>')
self.assertEqual(repr(i2.next()), '<Tag: t1>')
self.assertEqual(repr(i2.next()), '<Tag: t2>')
self.assertEqual(repr(i2.next()), '<Tag: t3>')
self.assertEqual(repr(i1.next()), '<Tag: t3>')
qs = X.objects.all()
self.assertEqual(bool(qs), False)
self.assertEqual(bool(qs), False)
def test_nested_queries_sql(self):
# Nested queries should not evaluate the inner query as part of constructing the
# SQL (so we should see a nested query here, indicated by two "SELECT" calls).
qs = Annotation.objects.filter(notes__in=Note.objects.filter(note="xyzzy"))
self.assertEqual(
qs.query.get_compiler(qs.db).as_sql()[0].count('SELECT'),
2
)
def test_tickets_8921_9188(self):
# Incorrect SQL was being generated for certain types of exclude()
# queries that crossed multi-valued relations (#8921, #9188 and some
# pre-emptively discovered cases).
self.assertQuerysetEqual(
PointerA.objects.filter(connection__pointerb__id=1),
[]
)
self.assertQuerysetEqual(
PointerA.objects.exclude(connection__pointerb__id=1),
[]
)
# This next makes exactly *zero* sense, but it works. It's needed
# because MySQL fails to give the right results the first time this
# query is executed. If you run the same query a second time, it
# works fine. It's a hack, but it works...
list(Tag.objects.exclude(children=None))
self.assertQuerysetEqual(
Tag.objects.exclude(children=None),
['<Tag: t1>', '<Tag: t3>']
)
# This example is tricky because the parent could be NULL, so only checking
# parents with annotations omits some results (tag t1, in this case).
self.assertQuerysetEqual(
Tag.objects.exclude(parent__annotation__name="a1"),
['<Tag: t1>', '<Tag: t4>', '<Tag: t5>']
)
# The annotation->tag link is single values and tag->children links is
# multi-valued. So we have to split the exclude filter in the middle
# and then optimise the inner query without losing results.
self.assertQuerysetEqual(
Annotation.objects.exclude(tag__children__name="t2"),
['<Annotation: a2>']
)
# Nested queries are possible (although should be used with care, since
# they have performance problems on backends like MySQL.
self.assertQuerysetEqual(
Annotation.objects.filter(notes__in=Note.objects.filter(note="n1")),
['<Annotation: a1>']
)
def test_ticket3739(self):
# The all() method on querysets returns a copy of the queryset.
q1 = Tag.objects.order_by('name')
self.assertIsNot(q1, q1.all())
class GeneratorExpressionTests(TestCase):
def test_ticket10432(self):
# Using an empty generator expression as the rvalue for an "__in"
# lookup is legal.
self.assertQuerysetEqual(
Note.objects.filter(pk__in=(x for x in ())),
[]
)
class ComparisonTests(TestCase):
def setUp(self):
self.n1 = Note.objects.create(note='n1', misc='foo', id=1)
e1 = ExtraInfo.objects.create(info='e1', note=self.n1)
self.a2 = Author.objects.create(name='a2', num=2002, extra=e1)
def test_ticket8597(self):
# Regression tests for case-insensitive comparisons
_ = Item.objects.create(name="a_b", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
_ = Item.objects.create(name="x%y", created=datetime.datetime.now(), creator=self.a2, note=self.n1)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iexact="x%Y"),
['<Item: x%y>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__istartswith="A_b"),
['<Item: a_b>']
)
self.assertQuerysetEqual(
Item.objects.filter(name__iendswith="A_b"),
['<Item: a_b>']
)
class ExistsSql(TestCase):
def setUp(self):
settings.DEBUG = True
def test_exists(self):
self.assertFalse(Tag.objects.exists())
# Ok - so the exist query worked - but did it include too many columns?
self.assertTrue("id" not in connection.queries[-1]['sql'] and "name" not in connection.queries[-1]['sql'])
def tearDown(self):
settings.DEBUG = False
class QuerysetOrderedTests(unittest.TestCase):
"""
Tests for the Queryset.ordered attribute.
"""
def test_no_default_or_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().ordered, False)
def test_cleared_default_ordering(self):
self.assertEqual(Tag.objects.all().ordered, True)
self.assertEqual(Tag.objects.all().order_by().ordered, False)
def test_explicit_ordering(self):
self.assertEqual(Annotation.objects.all().order_by('id').ordered, True)
def test_order_by_extra(self):
self.assertEqual(Annotation.objects.all().extra(order_by=['id']).ordered, True)
def test_annotated_ordering(self):
qs = Annotation.objects.annotate(num_notes=Count('notes'))
self.assertEqual(qs.ordered, False)
self.assertEqual(qs.order_by('num_notes').ordered, True)
class SubqueryTests(TestCase):
def setUp(self):
DumbCategory.objects.create(id=1)
DumbCategory.objects.create(id=2)
DumbCategory.objects.create(id=3)
def test_ordered_subselect(self):
"Subselects honor any manual ordering"
try:
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:2])
self.assertEqual(set(query.values_list('id', flat=True)), set([2,3]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[:2])
self.assertEqual(set(query.values_list('id', flat=True)), set([2,3]))
query = DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[2:])
self.assertEqual(set(query.values_list('id', flat=True)), set([1]))
except DatabaseError:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries)
def test_sliced_delete(self):
"Delete queries can safely contain sliced subqueries"
try:
DumbCategory.objects.filter(id__in=DumbCategory.objects.order_by('-id')[0:1]).delete()
self.assertEqual(set(DumbCategory.objects.values_list('id', flat=True)), set([1,2]))
except DatabaseError:
# Oracle and MySQL both have problems with sliced subselects.
# This prevents us from even evaluating this test case at all.
# Refs #10099
self.assertFalse(connections[DEFAULT_DB_ALIAS].features.allow_sliced_subqueries)
class CloneTests(TestCase):
def test_evaluated_queryset_as_argument(self):
"#13227 -- If a queryset is already evaluated, it can still be used as a query arg"
n = Note(note='Test1', misc='misc')
n.save()
e = ExtraInfo(info='good', note=n)
e.save()
n_list = Note.objects.all()
# Evaluate the Note queryset, populating the query cache
list(n_list)
# Use the note queryset in a query, and evalute
# that query in a way that involves cloning.
try:
self.assertEqual(ExtraInfo.objects.filter(note__in=n_list)[0].info, 'good')
except:
self.fail('Query should be clonable')
class EmptyQuerySetTests(TestCase):
def test_emptyqueryset_values(self):
# #14366 -- Calling .values() on an EmptyQuerySet and then cloning that
# should not cause an error"
self.assertQuerysetEqual(
Number.objects.none().values('num').order_by('num'), []
)
def test_values_subquery(self):
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values("pk")),
[]
)
self.assertQuerysetEqual(
Number.objects.filter(pk__in=Number.objects.none().values_list("pk")),
[]
)
class ValuesQuerysetTests(BaseQuerysetTest):
def test_flat_values_lits(self):
Number.objects.create(num=72)
qs = Number.objects.values_list("num")
qs = qs.values_list("num", flat=True)
self.assertValueQuerysetEqual(
qs, [72]
)
class WeirdQuerysetSlicingTests(BaseQuerysetTest):
def setUp(self):
Number.objects.create(num=1)
Number.objects.create(num=2)
Article.objects.create(name='one', created=datetime.datetime.now())
Article.objects.create(name='two', created=datetime.datetime.now())
Article.objects.create(name='three', created=datetime.datetime.now())
Article.objects.create(name='four', created=datetime.datetime.now())
def test_tickets_7698_10202(self):
# People like to slice with '0' as the high-water mark.
self.assertQuerysetEqual(Article.objects.all()[0:0], [])
self.assertQuerysetEqual(Article.objects.all()[0:0][:10], [])
self.assertEqual(Article.objects.all()[:0].count(), 0)
self.assertRaisesMessage(
AssertionError,
'Cannot change a query once a slice has been taken.',
Article.objects.all()[:0].latest, 'created'
)
def test_empty_resultset_sql(self):
# ticket #12192
self.assertNumQueries(0, lambda: list(Number.objects.all()[1:1]))
class EscapingTests(TestCase):
def test_ticket_7302(self):
# Reserved names are appropriately escaped
_ = ReservedName.objects.create(name='a', order=42)
ReservedName.objects.create(name='b', order=37)
self.assertQuerysetEqual(
ReservedName.objects.all().order_by('order'),
['<ReservedName: b>', '<ReservedName: a>']
)
self.assertQuerysetEqual(
ReservedName.objects.extra(select={'stuff':'name'}, order_by=('order','stuff')),
['<ReservedName: b>', '<ReservedName: a>']
)
class ToFieldTests(TestCase):
def test_in_query(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food__in=[apple, pear])),
set([lunch, dinner]),
)
def test_reverse_in(self):
apple = Food.objects.create(name="apple")
pear = Food.objects.create(name="pear")
lunch_apple = Eaten.objects.create(food=apple, meal="lunch")
lunch_pear = Eaten.objects.create(food=pear, meal="dinner")
self.assertEqual(
set(Food.objects.filter(eaten__in=[lunch_apple, lunch_pear])),
set([apple, pear])
)
def test_single_object(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
dinner = Eaten.objects.create(food=apple, meal="dinner")
self.assertEqual(
set(Eaten.objects.filter(food=apple)),
set([lunch, dinner])
)
def test_single_object_reverse(self):
apple = Food.objects.create(name="apple")
lunch = Eaten.objects.create(food=apple, meal="lunch")
self.assertEqual(
set(Food.objects.filter(eaten=lunch)),
set([apple])
)
def test_recursive_fk(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(parent=node1)),
[node2]
)
def test_recursive_fk_reverse(self):
node1 = Node.objects.create(num=42)
node2 = Node.objects.create(num=1, parent=node1)
self.assertEqual(
list(Node.objects.filter(node=node2)),
[node1]
)
class ConditionalTests(BaseQuerysetTest):
"""Tests whose execution depend on dfferent environment conditions like
Python version or DB backend features"""
def setUp(self):
generic = NamedCategory.objects.create(name="Generic")
t1 = Tag.objects.create(name='t1', category=generic)
t2 = Tag.objects.create(name='t2', parent=t1, category=generic)
t3 = Tag.objects.create(name='t3', parent=t1)
t4 = Tag.objects.create(name='t4', parent=t3)
t5 = Tag.objects.create(name='t5', parent=t3)
# In Python 2.6 beta releases, exceptions raised in __len__ are swallowed
# (Python issue 1242657), so these cases return an empty list, rather than
# raising an exception. Not a lot we can do about that, unfortunately, due to
# the way Python handles list() calls internally. Thus, we skip the tests for
# Python 2.6.
@unittest.skipIf(sys.version_info[:2] == (2, 6), "Python version is 2.6")
def test_infinite_loop(self):
# If you're not careful, it's possible to introduce infinite loops via
# default ordering on foreign keys in a cycle. We detect that.
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopX.objects.all()) # Force queryset evaluation with list()
)
self.assertRaisesMessage(
FieldError,
'Infinite loop caused by ordering.',
lambda: list(LoopZ.objects.all()) # Force queryset evaluation with list()
)
# Note that this doesn't cause an infinite loop, since the default
# ordering on the Tag model is empty (and thus defaults to using "id"
# for the related field).
self.assertEqual(len(Tag.objects.order_by('parent')), 5)
# ... but you can still order in a non-recursive fashion amongst linked
# fields (the previous test failed because the default ordering was
# recursive).
self.assertQuerysetEqual(
LoopX.objects.all().order_by('y__x__y__x__id'),
[]
)
# When grouping without specifying ordering, we add an explicit "ORDER BY NULL"
# portion in MySQL to prevent unnecessary sorting.
@skipUnlessDBFeature('requires_explicit_null_ordering_when_grouping')
def test_null_ordering_added(self):
query = Tag.objects.values_list('parent_id', flat=True).order_by().query
query.group_by = ['parent_id']
sql = query.get_compiler(DEFAULT_DB_ALIAS).as_sql()[0]
fragment = "ORDER BY "
pos = sql.find(fragment)
self.assertEqual(sql.find(fragment, pos + 1), -1)
self.assertEqual(sql.find("NULL", pos + len(fragment)), pos + len(fragment))
# Sqlite 3 does not support passing in more than 1000 parameters except by
# changing a parameter at compilation time.
@skipUnlessDBFeature('supports_1000_query_parameters')
def test_ticket14244(self):
# Test that the "in" lookup works with lists of 1000 items or more.
Number.objects.all().delete()
numbers = range(2500)
for num in numbers:
_ = Number.objects.create(num=num)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1000]).count(),
1000
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:1001]).count(),
1001
)
self.assertEqual(
Number.objects.filter(num__in=numbers[:2000]).count(),
2000
)
self.assertEqual(
Number.objects.filter(num__in=numbers).count(),
2500
)
class UnionTests(unittest.TestCase):
"""
Tests for the union of two querysets. Bug #12252.
"""
def setUp(self):
objectas = []
objectbs = []
objectcs = []
a_info = ['one', 'two', 'three']
for name in a_info:
o = ObjectA(name=name)
o.save()
objectas.append(o)
b_info = [('un', 1, objectas[0]), ('deux', 2, objectas[0]), ('trois', 3, objectas[2])]
for name, number, objecta in b_info:
o = ObjectB(name=name, num=number, objecta=objecta)
o.save()
objectbs.append(o)
c_info = [('ein', objectas[2], objectbs[2]), ('zwei', objectas[1], objectbs[1])]
for name, objecta, objectb in c_info:
o = ObjectC(name=name, objecta=objecta, objectb=objectb)
o.save()
objectcs.append(o)
def check_union(self, model, Q1, Q2):
filter = model.objects.filter
self.assertEqual(set(filter(Q1) | filter(Q2)), set(filter(Q1 | Q2)))
self.assertEqual(set(filter(Q2) | filter(Q1)), set(filter(Q1 | Q2)))
def test_A_AB(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_A_AB2(self):
Q1 = Q(name='two')
Q2 = Q(objectb__name='deux', objectb__num=2)
self.check_union(ObjectA, Q1, Q2)
def test_AB_ACB(self):
Q1 = Q(objectb__name='deux')
Q2 = Q(objectc__objectb__name='deux')
self.check_union(ObjectA, Q1, Q2)
def test_BAB_BAC(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__name='ein')
self.check_union(ObjectB, Q1, Q2)
def test_BAB_BACB(self):
Q1 = Q(objecta__objectb__name='deux')
Q2 = Q(objecta__objectc__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
def test_BA_BCA__BAB_BAC_BCA(self):
Q1 = Q(objecta__name='one', objectc__objecta__name='two')
Q2 = Q(objecta__objectc__name='ein', objectc__objecta__name='three', objecta__objectb__name='trois')
self.check_union(ObjectB, Q1, Q2)
| mit |
dicehub/dice_tools | dice_tools/helpers/xmodel/_model.py | 1 | 13569 | from dice_tools import wizard, DICEObject, diceCall
from ._selection import ModelSelection
import weakref
__all__ = [
'Model'
]
class Model(DICEObject):
"""This type exports model to DICE QML and provides several
features like item selection, item iterators etc.
Model can represent simple lists and data with complex tree hierarchy.
Implementation notes:
* Every item identified by python's object identity function id.
* Items can be repeated in model, but selection and current_item
will work with some issues: selection will sedTlect all found duplicates
and current item moves to first found item in model.
* Items move has some limitations now, see method comment.
* Every action is sent to dice in asynchronous mode.
"""
def __init__(self, model_data, **kwargs):
"""Initialization
"""
super().__init__(base_type = 'BaseModel', **kwargs)
self.__data = None
self.setup(model_data)
def connected(self):
super().connected()
root_item_id = id(self.__data.root_item)
roles = self.__data.model_roles
methods = self.__data.model_methods
self.x_model_reset(root_item_id, roles, methods, mode=1)
def fill(item):
if item in self.__fetched:
self.__model_insert_children(item, mode=1)
for v in self.__data.elements(item):
fill(v)
fill(self.__data.root_item)
def setup(self, model_data):
"""Resets the model and initializes it to use model data passed in arguments.
Args:
model_data (ModelData): Model data, which will be exported to DICE
Returns:
None
"""
if self.__data:
wizard.unsubscribe(self, self.__data.root_item)
wizard.unsubscribe(self, self.__data)
self.__current = lambda: None
self.__selection = ModelSelection(self)
self.__data = model_data
self.__fetched = weakref.WeakSet()
self.__items = weakref.WeakValueDictionary()
root_item_id = id(self.__data.root_item)
self.__items[root_item_id] = self.__data.root_item
wizard.subscribe(self, self.__data)
wizard.subscribe(self, self.__data.root_item)
wizard.subscribe(self, self.__selection)
roles = self.__data.model_roles
methods = self.__data.model_methods
self.x_model_reset(root_item_id, roles, methods, callback=None)
@property
def root_elements(self):
"""Contains elements of root item in model. Using
this property simplifies access to model items when
model represents simple item list.
Returns:
ModelElements: root item elements
"""
return self.__data.elements(self.__data.root_item)
@property
def selection(self):
"""Current selection. Selection is set-like object of type
ModelSelection, and it contains selected items.
Returns:
ModelSelection: current selection
"""
return self.__selection
@property
def root_item(self):
"""Root item of this model
Returns:
Root item, type depend on model data implementation
"""
return self.__data.root_item
@property
def data(self):
"""This property holds model data. Model data provides items,
roles and methods of model items. Model data always has root
item. Methods, related to data modifications, implemented in
model data too.
Returns:
Instance of ModelData
"""
return self.__data
@property
def current_item(self):
"""Current item represents current cursor in model.
Returns:
tuple: current item
"""
return self.__current()
@current_item.setter
def current_item(self, item):
"""Sets cursor to item.
Args:
item: Model item
Returns:
None
"""
# if self.__current() != item:
if item == None:
self.__current = lambda: None
self.x_set_current(id(self.__data.root_item), callback=None)
else:
self.__current = weakref.ref(item)
item_id = id(item)
if item_id in self.__items:
self.x_set_current(item_id, callback=None)
def __iter__(self):
def walk(p):
items = self.__data.elements(p)
if items:
for v in items:
yield v
for vv in walk(v):
yield vv
return walk(self.__data.root_item)
def elements_of(self, type):
"""This method returns generator, which yields model
items filtered by type.
Args:
type: Type to filter items
Returns:
generator
"""
for v in self:
if isinstance(v, type):
yield v
# wizard handlers
def w_model_select_item(self, selection, item):
# print('select', item)
self.__model_select_item(item)
def w_model_deselect_item(self, item):
self.__model_deselect_item(item)
def w_model_insert_items(self, parent, row=0, count=None):
self.__model_insert_children(parent, row, count)
def w_model_remove_items(self, parent, row=0, count=None):
self.__model_remove_children(parent, row, count)
def w_model_update_item(self, item):
self.__model_update_item(item)
def w_model_move_items(self, model_data, source, source_row, count, dest,
dest_row):
self.__model_move_items(source, source_row, count, dest, dest_row)
def w_model_set_current(self, item):
self.current_item = item
# private methods
def __model_move_items(self, source, source_row, count, dest, dest_row):
# model items move allowed only if items exists in DICE
source_fetched = source in self.__fetched
dest_fetched = dest in self.__fetched
if not source_fetched and not dest_fetched:
return
if not source_fetched:
self.__model_insert_children(dest, dest_row, count)
elif not dest_fetched:
self.__model_remove_children(source, source_row, count)
else:
self.x_model_move_items(id(source), source_row, count,
id(dest), dest_row, callback=None)
def __model_insert_children(self, item, row=0, count=None, mode=0):
# do not send data if item is not expanded and has no children in DICE
if item in self.__fetched:
children = self.__data.elements(item)
count = count if count != None else len(children) - row
data = []
for i in range(row, row + count):
child = children[i]
# subscribe on new item to get wizard events about it
wizard.subscribe(self, child)
child_id = id(child)
# remember item for future identification by ID
self.__items[child_id] = child
params = dict(row=i,
roles=self.__data.roles(child),
item_id=child_id)
# query model data for child items
if self.__data.elements(child) != None:
params['can_have_children'] = True
# is item in selection
if child in self.__selection:
params['selected'] = True
# is item is current
if self.__current() == child:
params['current'] = True
data.append(params)
# send data about new items
self.x_model_insert_items(id(item), data, mode=mode)
def __model_remove_children(self, item, row=0, count=None):
# send data only if items exists in DICE (item's parent expanded)
if item in self.__fetched:
children = self.__data.elements(item)
count = count if count != None else len(children) - row
if count > 0:
self.x_model_remove_items(id(item), row, count)
def __model_update_item(self, item, row=None, count=1):
item_id = id(item)
# update only if item exists in DICE (item's parent expanded)
if item_id in self.__items:
self.x_model_update_item(item_id, self.__data.roles(item))
def __model_select_item(self, item):
item_id = id(item)
# select only if item exists in DICE (item's parent expanded)
if item_id in self.__items:
self.x_model_select([item_id], True, False)
wizard.w_model_selection_changed(self, [item], [])
def __model_deselect_item(self, item):
item_id = id(item)
# select only if item exists in DICE (item's parent expanded)
if item_id in self.__items:
self.x_model_select([item_id], False, False)
wizard.w_model_selection_changed(self, [], [item])
# notifications
def n_model_fetch_items(self, item_id):
"""Model items fetched dynamically when model item
expanded in view. Root item expanded when model
view becomes visible.
Args:
item_id: ID of item to fetch
Returns:
None
"""
item = self.__items.get(item_id)
if item and item not in self.__fetched:
self.__fetched.add(item)
self.__model_insert_children(item)
def n_model_current_changed(self, cur_item_id, prev_item_id, modifiers):
"""Callback on cursor move. In QML cursor can be moved
by selection model, which can be accessed by 'selection'
property of model in QML.
Args:
cur_item_id: ID of item under cursor.
prev_item_id: ID of previous item ID.
modifiers: Keyboard modifiers
Returns:
TYPE: Description
"""
current = self.__items.get(cur_item_id)
prev = self.__items.get(prev_item_id)
if current == None:
self.__current = lambda: None
else:
self.__current = weakref.ref(current)
wizard.w_model_current_changed(self, current, prev)
def n_model_change_selection(self, selected, deselected):
"""Selection change notification
Args:
selected (list): IDs of selected items
deselected (list): IDs of deselected items
Returns:
None
"""
selected_items = []
deselected_items = []
# unsubscribe from selection to skip selection notification
wizard.unsubscribe(self, self.__selection)
for item_id in selected:
item = self.__items.get(item_id)
if item and item not in self.__selection:
selected_items.append(item)
self.__selection.add(item)
for item_id in deselected:
item = self.__items.get(item_id)
if item and item in self.__selection:
deselected_items.append(item)
self.__selection.discard(item)
# subscribe back
wizard.subscribe(self, self.__selection)
wizard.w_model_selection_changed(self, selected_items, deselected_items)
def n_model_set_item_data(self, item_id, role, value):
"""Called when item role was modified
Args:
item_id: ID of item
role (str): Role name
value: New value
Returns:
None
"""
item = self.__items.get(item_id)
if item:
self.__data.set_data(item, role, value)
def n_model_call_item_method(self, item_id, method, args, kwargs):
"""Called when model wants call item's method
Args:
item_id: ID of item
method (str): Method name to call
args (list): Arguments for method call
kwargs (dict): Keyword arguments for method call
Returns:
None
"""
item = self.__items.get(item_id)
if item:
return self.__data.call(item, method, args, kwargs)
def n_model_move_items(self, source_id, source_row, count, dest_id,
dest_row):
"""Called when model queries for items move
Args:
source_id: ID of parent
source_row: Starting index in parent's elements
count: Number of elements for move
dest_id: ID of new parent for elements
dest_row: Index to insert elements
Returns:
None
"""
source = self.__items.get(source_id)
dest = self.__items.get(dest_id)
if source and dest:
self.__data.move(source, source_row, count, dest, dest_row)
@diceCall
def x_model_reset(self):
pass
@diceCall
def x_set_current(self):
pass
@diceCall
def x_model_move_items(self):
pass
@diceCall
def x_model_insert_items(self):
pass
@diceCall
def x_model_remove_items(self):
pass
@diceCall
def x_model_update_item(self):
pass
@diceCall
def x_model_select(self):
pass | mit |
noironetworks/neutron | neutron/agent/l3/extensions/port_forwarding.py | 2 | 19998 | # Copyright 2018 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import netaddr
from oslo_concurrency import lockutils
from oslo_log import log as logging
from neutron.agent.linux import ip_lib
from neutron.api.rpc.callbacks.consumer import registry
from neutron.api.rpc.callbacks import events
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import resources_rpc
from neutron.common import constants
from neutron.common import rpc as n_rpc
from neutron_lib.agent import l3_extension
from neutron_lib import constants as lib_consts
LOG = logging.getLogger(__name__)
DEFAULT_PORT_FORWARDING_CHAIN = 'fip-pf'
PORT_FORWARDING_PREFIX = 'fip_portforwarding-'
PORT_FORWARDING_CHAIN_PREFIX = 'pf-'
class RouterFipPortForwardingMapping(object):
def __init__(self):
self.managed_port_forwardings = {}
"""
fip_port_forwarding = {
fip_id_1: set(pf_id1, pf_id2),
fip_id_2: set(pf_id3, pf_id4)
}
"""
self.fip_port_forwarding = collections.defaultdict(set)
"""
router_fip_mapping = {
router_id_1: set(fip_id_1, fip_id_2),
router_id_2: set(fip_id_3, fip_id_4)
}
"""
self.router_fip_mapping = collections.defaultdict(set)
def set_port_forwardings(self, port_forwardings):
for port_forwarding in port_forwardings:
self.set_fip_port_forwarding(port_forwarding.floatingip_id,
port_forwarding,
port_forwarding.router_id)
def update_port_forwardings(self, port_forwardings):
for port_forwarding in port_forwardings:
self.managed_port_forwardings[port_forwarding.id] = port_forwarding
def get_port_forwarding(self, port_forwarding_id):
return self.managed_port_forwardings.get(port_forwarding_id)
def del_port_forwardings(self, port_forwardings):
for port_forwarding in port_forwardings:
if not self.get_port_forwarding(port_forwarding.id):
continue
self.managed_port_forwardings.pop(port_forwarding.id)
self.fip_port_forwarding[port_forwarding.floatingip_id].remove(
port_forwarding.id)
if not self.fip_port_forwarding[port_forwarding.floatingip_id]:
self.fip_port_forwarding.pop(port_forwarding.floatingip_id)
self.router_fip_mapping[port_forwarding.router_id].remove(
port_forwarding.floatingip_id)
if not self.router_fip_mapping[port_forwarding.router_id]:
del self.router_fip_mapping[port_forwarding.router_id]
def set_fip_port_forwarding(self, fip_id, pf, router_id):
self.router_fip_mapping[router_id].add(fip_id)
self.fip_port_forwarding[fip_id].add(pf.id)
self.managed_port_forwardings[pf.id] = pf
def clear_by_fip(self, fip_id, router_id):
self.router_fip_mapping[router_id].remove(fip_id)
if len(self.router_fip_mapping[router_id]) == 0:
del self.router_fip_mapping[router_id]
for pf_id in self.fip_port_forwarding[fip_id]:
del self.managed_port_forwardings[pf_id]
del self.fip_port_forwarding[fip_id]
def check_port_forwarding_changes(self, new_pf):
old_pf = self.get_port_forwarding(new_pf.id)
return old_pf != new_pf
class PortForwardingAgentExtension(l3_extension.L3AgentExtension):
SUPPORTED_RESOURCE_TYPES = [resources.PORTFORWARDING]
def initialize(self, connection, driver_type):
self.resource_rpc = resources_rpc.ResourcesPullRpcApi()
self._register_rpc_consumers()
self.mapping = RouterFipPortForwardingMapping()
def _register_rpc_consumers(self):
registry.register(self._handle_notification,
resources.PORTFORWARDING)
self._connection = n_rpc.Connection()
endpoints = [resources_rpc.ResourcesPushRpcCallback()]
topic = resources_rpc.resource_type_versioned_topic(
resources.PORTFORWARDING)
self._connection.create_consumer(topic, endpoints, fanout=True)
self._connection.consume_in_threads()
def consume_api(self, agent_api):
self.agent_api = agent_api
@lockutils.synchronized('port-forwarding')
def _handle_notification(self, context, resource_type,
forwardings, event_type):
for forwarding in forwardings:
self._process_port_forwarding_event(
context, forwarding, event_type)
def _store_local(self, pf_objs, event_type):
if event_type == events.CREATED:
self.mapping.set_port_forwardings(pf_objs)
elif event_type == events.UPDATED:
self.mapping.update_port_forwardings(pf_objs)
elif event_type == events.DELETED:
self.mapping.del_port_forwardings(pf_objs)
def _get_fip_rules(self, port_forward, wrap_name):
chain_rule_list = []
pf_chain_name = self._get_port_forwarding_chain_name(port_forward.id)
chain_rule_list.append((DEFAULT_PORT_FORWARDING_CHAIN,
'-j %s-%s' %
(wrap_name, pf_chain_name)))
floating_ip_address = str(port_forward.floating_ip_address)
protocol = port_forward.protocol
internal_ip_address = str(port_forward.internal_ip_address)
internal_port = port_forward.internal_port
external_port = port_forward.external_port
chain_rule = (pf_chain_name,
'-d %s/32 -p %s -m %s --dport %s '
'-j DNAT --to-destination %s:%s' % (
floating_ip_address, protocol, protocol,
external_port, internal_ip_address,
internal_port))
chain_rule_list.append(chain_rule)
return chain_rule_list
def _rule_apply(self, iptables_manager, port_forwarding, rule_tag):
iptables_manager.ipv4['nat'].clear_rules_by_tag(rule_tag)
if DEFAULT_PORT_FORWARDING_CHAIN not in iptables_manager.ipv4[
'nat'].chains:
self._install_default_rules(iptables_manager)
for chain, rule in self._get_fip_rules(
port_forwarding, iptables_manager.wrap_name):
if chain not in iptables_manager.ipv4['nat'].chains:
iptables_manager.ipv4['nat'].add_chain(chain)
iptables_manager.ipv4['nat'].add_rule(chain, rule, tag=rule_tag)
def _process_create(self, port_forwardings, ri, interface_name, namespace,
iptables_manager):
if not port_forwardings:
return
device = ip_lib.IPDevice(interface_name, namespace=namespace)
is_distributed = ri.router.get('distributed')
ha_port = ri.router.get(lib_consts.HA_INTERFACE_KEY, None)
fip_statuses = {}
for port_forwarding in port_forwardings:
# check if the port forwarding is managed in this agent from
# OVO and router rpc.
if port_forwarding.id in self.mapping.managed_port_forwardings:
LOG.debug("Skip port forwarding %s for create, as it had been "
"managed by agent", port_forwarding.id)
continue
existing_cidrs = ri.get_router_cidrs(device)
fip_ip = str(port_forwarding.floating_ip_address)
fip_cidr = str(netaddr.IPNetwork(fip_ip))
status = ''
if fip_cidr not in existing_cidrs:
try:
if not is_distributed:
fip_statuses[port_forwarding.floatingip_id] = (
ri.add_floating_ip(
{'floating_ip_address': fip_ip},
interface_name, device))
else:
if not ha_port:
device.addr.add(fip_cidr)
ip_lib.send_ip_addr_adv_notif(namespace,
interface_name,
fip_ip)
else:
ri._add_vip(fip_cidr, interface_name)
status = lib_consts.FLOATINGIP_STATUS_ACTIVE
except Exception:
# Any error will causes the fip status to be set 'ERROR'
status = lib_consts.FLOATINGIP_STATUS_ERROR
LOG.warning("Unable to configure floating IP %(fip_id)s "
"for port forwarding %(pf_id)s",
{'fip_id': port_forwarding.floatingip_id,
'pf_id': port_forwarding.id})
else:
if not ha_port:
ip_lib.send_ip_addr_adv_notif(namespace,
interface_name,
fip_ip)
if status:
fip_statuses[port_forwarding.floatingip_id] = status
if ha_port and ha_port['status'] == lib_consts.PORT_STATUS_ACTIVE:
ri.enable_keepalived()
for port_forwarding in port_forwardings:
rule_tag = PORT_FORWARDING_PREFIX + port_forwarding.id
self._rule_apply(iptables_manager, port_forwarding, rule_tag)
iptables_manager.apply()
self._sending_port_forwarding_fip_status(ri, fip_statuses)
self._store_local(port_forwardings, events.CREATED)
def _sending_port_forwarding_fip_status(self, ri, statuses):
if not statuses:
return
LOG.debug('Sending Port Forwarding floating ip '
'statuses: %s', statuses)
# Update floating IP status on the neutron server
ri.agent.plugin_rpc.update_floatingip_statuses(
ri.agent.context, ri.router_id, statuses)
def _get_resource_by_router(self, ri):
is_distributed = ri.router.get('distributed')
ex_gw_port = ri.get_ex_gw_port()
if not is_distributed:
interface_name = ri.get_external_device_interface_name(ex_gw_port)
namespace = ri.ns_name
iptables_manager = ri.iptables_manager
else:
interface_name = ri.get_snat_external_device_interface_name(
ex_gw_port)
namespace = ri.snat_namespace.name
iptables_manager = ri.snat_iptables_manager
return interface_name, namespace, iptables_manager
def _check_if_need_process(self, ri, force=False):
# force means the request comes from, if True means it comes from OVO,
# as we get a actually port forwarding object, then we need to check in
# the following steps. But False, means it comes from router rpc.
if not ri or not ri.get_ex_gw_port() or (
not force and not ri.fip_managed_by_port_forwardings):
# agent doesn't hold the router. pass
# This router doesn't own a gw port. pass
# This router doesn't hold a port forwarding mapping. pass
return False
is_distributed = ri.router.get('distributed')
agent_mode = ri.agent_conf.agent_mode
if (is_distributed and
agent_mode in [lib_consts.L3_AGENT_MODE_DVR_NO_EXTERNAL,
lib_consts.L3_AGENT_MODE_DVR]):
# just support centralized cases
return False
return True
def _process_port_forwarding_event(self, context, port_forwarding,
event_type):
router_id = port_forwarding.router_id
ri = self._get_router_info(router_id)
if not self._check_if_need_process(ri, force=True):
return
(interface_name, namespace,
iptables_manager) = self._get_resource_by_router(ri)
if event_type == events.CREATED:
self._process_create(
[port_forwarding], ri, interface_name, namespace,
iptables_manager)
elif event_type == events.UPDATED:
self._process_update([port_forwarding], iptables_manager,
interface_name, namespace)
elif event_type == events.DELETED:
self._process_delete(
context, [port_forwarding], ri, interface_name, namespace,
iptables_manager)
def _process_update(self, port_forwardings, iptables_manager,
interface_name, namespace):
if not port_forwardings:
return
device = ip_lib.IPDevice(interface_name, namespace=namespace)
for port_forwarding in port_forwardings:
# check if port forwarding change from OVO and router rpc
if not self.mapping.check_port_forwarding_changes(port_forwarding):
LOG.debug("Skip port forwarding %s for update, as there is no "
"difference between the memory managed by agent",
port_forwarding.id)
continue
current_chain = self._get_port_forwarding_chain_name(
port_forwarding.id)
iptables_manager.ipv4['nat'].remove_chain(current_chain)
ori_pf = self.mapping.managed_port_forwardings[port_forwarding.id]
device.delete_socket_conntrack_state(
str(ori_pf.floating_ip_address), ori_pf.external_port,
protocol=ori_pf.protocol)
rule_tag = PORT_FORWARDING_PREFIX + port_forwarding.id
self._rule_apply(iptables_manager, port_forwarding, rule_tag)
iptables_manager.apply()
self._store_local(port_forwardings, events.UPDATED)
def _process_delete(self, context, port_forwardings, ri, interface_name,
namespace, iptables_manager):
if not port_forwardings:
return
device = ip_lib.IPDevice(interface_name, namespace=namespace)
for port_forwarding in port_forwardings:
current_chain = self._get_port_forwarding_chain_name(
port_forwarding.id)
iptables_manager.ipv4['nat'].remove_chain(current_chain)
fip_address = str(port_forwarding.floating_ip_address)
device.delete_socket_conntrack_state(
fip_address, port_forwarding.external_port,
protocol=port_forwarding.protocol)
iptables_manager.apply()
fip_id_cidrs = set([(pf.floatingip_id,
str(pf.floating_ip_address)) for pf in
port_forwardings])
self._sync_and_remove_fip(context, fip_id_cidrs, device, ri)
self._store_local(port_forwardings, events.DELETED)
def _sync_and_remove_fip(self, context, fip_id_cidrs, device, ri):
if not fip_id_cidrs:
return
ha_port = ri.router.get(lib_consts.HA_INTERFACE_KEY)
fip_ids = [item[0] for item in fip_id_cidrs]
pfs = self.resource_rpc.bulk_pull(context, resources.PORTFORWARDING,
filter_kwargs={
'floatingip_id': fip_ids})
exist_fips = set()
fip_status = {}
for pf in pfs:
exist_fips.add(pf.floatingip_id)
for fip_id_cidr in fip_id_cidrs:
if fip_id_cidr[0] not in exist_fips:
if ha_port:
ri._remove_vip(fip_id_cidr[1])
else:
device.delete_addr_and_conntrack_state(fip_id_cidr[1])
fip_status[fip_id_cidr[0]] = 'DOWN'
if ha_port:
ri.enable_keepalived()
self._sending_port_forwarding_fip_status(ri, fip_status)
for fip_id in fip_status.keys():
self.mapping.clear_by_fip(fip_id, ri.router_id)
def _get_router_info(self, router_id):
router_info = self.agent_api.get_router_info(router_id)
if router_info:
return router_info
LOG.debug("Router %s is not managed by this agent. "
"It was possibly deleted concurrently.", router_id)
def _get_port_forwarding_chain_name(self, pf_id):
chain_name = PORT_FORWARDING_CHAIN_PREFIX + pf_id
return chain_name[:constants.MAX_IPTABLES_CHAIN_LEN_WRAP]
def _install_default_rules(self, iptables_manager):
default_rule = '-j %s-%s' % (iptables_manager.wrap_name,
DEFAULT_PORT_FORWARDING_CHAIN)
iptables_manager.ipv4['nat'].add_chain(DEFAULT_PORT_FORWARDING_CHAIN)
iptables_manager.ipv4['nat'].add_rule('PREROUTING', default_rule)
iptables_manager.apply()
def check_local_port_forwardings(self, context, ri, fip_ids):
pfs = self.resource_rpc.bulk_pull(context, resources.PORTFORWARDING,
filter_kwargs={
'floatingip_id': fip_ids})
(interface_name, namespace,
iptable_manager) = self._get_resource_by_router(ri)
local_pfs = set(self.mapping.managed_port_forwardings.keys())
new_pfs = []
updated_pfs = []
current_pfs = set()
for pf in pfs:
# check the request port forwardings, and split them into
# update, new, current part from router rpc
if pf.id in self.mapping.managed_port_forwardings:
if self.mapping.check_port_forwarding_changes(pf):
updated_pfs.append(pf)
else:
new_pfs.append(pf)
current_pfs.add(pf.id)
remove_pf_ids_set = local_pfs - current_pfs
remove_pfs = [self.mapping.managed_port_forwardings[pf_id]
for pf_id in remove_pf_ids_set]
self._process_update(updated_pfs, iptable_manager,
interface_name, namespace)
self._process_create(new_pfs, ri, interface_name,
namespace, iptable_manager)
self._process_delete(context, remove_pfs, ri, interface_name,
namespace, iptable_manager)
def process_port_forwarding(self, context, data):
ri = self._get_router_info(data['id'])
if not self._check_if_need_process(ri):
return
self.check_local_port_forwardings(
context, ri, ri.fip_managed_by_port_forwardings)
@lockutils.synchronized('port-forwarding')
def add_router(self, context, data):
"""Handle a router add event.
Called on router create.
:param context: RPC context.
:param data: Router data.
"""
self.process_port_forwarding(context, data)
@lockutils.synchronized('port-forwarding')
def update_router(self, context, data):
"""Handle a router update event.
Called on router update.
:param context: RPC context.
:param data: Router data.
"""
self.process_port_forwarding(context, data)
def delete_router(self, context, data):
"""Handle a router delete event.
:param context: RPC context.
:param data: Router data.
"""
pass
def ha_state_change(self, context, data):
pass
| apache-2.0 |
eharney/nova | nova/tests/integrated/integrated_helpers.py | 12 | 5398 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Provides common functionality for integrated unit tests
"""
import random
import string
import uuid
from oslo.config import cfg
import nova.image.glance
from nova.openstack.common import log as logging
from nova import service
from nova import test
from nova.tests import cast_as_call
from nova.tests import fake_crypto
import nova.tests.image.fake
from nova.tests.integrated.api import client
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('manager', 'nova.cells.opts', group='cells')
def generate_random_alphanumeric(length):
"""Creates a random alphanumeric string of specified length."""
return ''.join(random.choice(string.ascii_uppercase + string.digits)
for _x in range(length))
def generate_random_numeric(length):
"""Creates a random numeric string of specified length."""
return ''.join(random.choice(string.digits)
for _x in range(length))
def generate_new_element(items, prefix, numeric=False):
"""Creates a random string with prefix, that is not in 'items' list."""
while True:
if numeric:
candidate = prefix + generate_random_numeric(8)
else:
candidate = prefix + generate_random_alphanumeric(8)
if candidate not in items:
return candidate
LOG.debug("Random collision on %s" % candidate)
class _IntegratedTestBase(test.TestCase):
def setUp(self):
super(_IntegratedTestBase, self).setUp()
f = self._get_flags()
self.flags(**f)
self.flags(verbose=True)
self.flags(periodic_enable=False)
self.useFixture(test.ReplaceModule('crypto', fake_crypto))
nova.tests.image.fake.stub_out_image_service(self.stubs)
self.flags(scheduler_driver='nova.scheduler.'
'chance.ChanceScheduler')
self._setup_services()
self._start_api_service()
self.api = self._get_test_client()
self.useFixture(cast_as_call.CastAsCall(self.stubs))
def _setup_services(self):
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self.compute = self.start_service('compute')
self.cert = self.start_service('cert')
self.consoleauth = self.start_service('consoleauth')
self.network = self.start_service('network')
self.scheduler = self.start_service('scheduler')
self.cells = self.start_service('cells', manager=CONF.cells.manager)
def tearDown(self):
self.osapi.stop()
nova.tests.image.fake.FakeImageService_reset()
super(_IntegratedTestBase, self).tearDown()
def _get_test_client(self):
return client.TestOpenStackClient('fake', 'fake', self.auth_url)
def _start_api_service(self):
self.osapi = service.WSGIService("osapi_compute")
self.osapi.start()
self.auth_url = 'http://%(host)s:%(port)s/%(api_version)s' % ({
'host': self.osapi.host, 'port': self.osapi.port,
'api_version': self._api_version})
def _get_flags(self):
"""An opportunity to setup flags, before the services are started."""
f = {}
# Ensure tests only listen on localhost
f['ec2_listen'] = '127.0.0.1'
f['osapi_compute_listen'] = '127.0.0.1'
f['metadata_listen'] = '127.0.0.1'
# Auto-assign ports to allow concurrent tests
f['ec2_listen_port'] = 0
f['osapi_compute_listen_port'] = 0
f['metadata_listen_port'] = 0
f['fake_network'] = True
return f
def get_unused_server_name(self):
servers = self.api.get_servers()
server_names = [server['name'] for server in servers]
return generate_new_element(server_names, 'server')
def get_invalid_image(self):
return str(uuid.uuid4())
def _build_minimal_create_server_request(self):
server = {}
image = self.api.get_images()[0]
LOG.debug("Image: %s" % image)
if self._image_ref_parameter in image:
image_href = image[self._image_ref_parameter]
else:
image_href = image['id']
image_href = 'http://fake.server/%s' % image_href
# We now have a valid imageId
server[self._image_ref_parameter] = image_href
# Set a valid flavorId
flavor = self.api.get_flavors()[0]
LOG.debug("Using flavor: %s" % flavor)
server[self._flavor_ref_parameter] = ('http://fake.server/%s'
% flavor['id'])
# Set a valid server name
server_name = self.get_unused_server_name()
server['name'] = server_name
return server
| apache-2.0 |
skeezix/compo4all | spaghetti-server/multiscore_handler.py | 1 | 2785 |
import logging
import modulemap
import paths
import singlescore_handler
logging.info ( "LOADING: multiscore_handler" )
def update_hi ( req ):
# how many slots in the table?
n = modulemap.gamemap [ req [ 'gamename' ] ][ 'module' ].get_table_slots ( req )
# keep a copy of bindata since get_hi is destructive right now .. why oh why?!
bindata = req [ '_bindata' ]
# get the template table bits..
template = get_hi ( req )
thi = list()
for i in range ( n ):
d = modulemap.gamemap [ req [ 'gamename' ] ][ 'module' ].get_table_slot_dict ( req, template, i )
thi.append ( d )
logging.debug ( "%s template slot %d is %s: %s" % ( req [ 'gamename' ], i, d [ 'shortname' ], d [ 'score' ] ) )
req [ '_bindata' ] = bindata
# with luck, we can parse out a single entries block and send that to single-handler; do this once per entry
# in the hi table, that looks like a new entry
# for each entry in table
# does it look like a new entry? is it already in the template?
# if new, send it to singleserver
for i in range ( n ):
d = modulemap.gamemap [ req [ 'gamename' ] ][ 'module' ].get_table_slot_dict ( req, bindata, i )
found = False
for t in range ( n ):
if d [ 'score' ] == thi [ t ][ 'score' ] and d [ 'shortname' ] == thi [ t ][ 'shortname' ]:
found = True
if found == False:
logging.debug ( "%s - incoming slot %d is %s: %s -> looks new" % ( req [ 'gamename' ], i, d [ 'shortname' ], d [ 'score' ] ) )
singlescore_handler.update_hi ( req, int ( d [ 'score' ] ) )
else:
logging.debug ( "%s - incoming slot %d is %s: %s -> looks like looper" % ( req [ 'gamename' ], i, d [ 'shortname' ], d [ 'score' ] ) )
def get_hi ( req ):
# send the template hi file; thats what the game wanted to use, so lets feed it the defaults..
bp = paths.templatepath ( req )
f = open ( bp, "rb" )
bindata = f.read()
f.close()
req [ '_bindata' ] = bindata
req [ '_binlen' ] = len ( bindata )
logging.info ( "%s - pulled template hi file (len %s)" % ( req [ 'gamename' ], req [ '_binlen' ] ) )
try:
d = modulemap.gamemap [ req [ 'gamename' ] ][ 'module' ].optional_prepare_template ( req )
except:
pass
return bindata
def get_json_tally ( req, raw=False ):
return singlescore_handler.get_json_tally ( req, raw=raw )
def get_html_tally ( req ):
return singlescore_handler.get_html_tally ( req )
def get_last_modify_epoch ( req ):
return singlescore_handler.get_last_modify_epoch ( req )
def _read_tally ( req ):
return singlescore_handler._read_tally ( req )
def done ( req ):
return singlescore_handler.done ( req )
| gpl-2.0 |
VioletRed/plugin.video.youtube | resources/lib/kodion/utils/function_cache.py | 27 | 3150 | from functools import partial
import hashlib
import datetime
from storage import Storage
class FunctionCache(Storage):
ONE_MINUTE = 60
ONE_HOUR = 60 * ONE_MINUTE
ONE_DAY = 24 * ONE_HOUR
ONE_WEEK = 7 * ONE_DAY
ONE_MONTH = 4 * ONE_WEEK
def __init__(self, filename, max_file_size_kb=-1):
Storage.__init__(self, filename, max_file_size_kb=max_file_size_kb)
self._enabled = True
pass
def clear(self):
self._clear()
pass
def enabled(self):
"""
Enables the caching
:return:
"""
self._enabled = True
pass
def disable(self):
"""
Disable caching e.g. for tests
:return:
"""
self._enabled = False
pass
def _create_id_from_func(self, partial_func):
"""
Creats an id from the given function
:param partial_func:
:return: id for the given function
"""
m = hashlib.md5()
m.update(partial_func.func.__module__)
m.update(partial_func.func.__name__)
m.update(str(partial_func.args))
m.update(str(partial_func.keywords))
return m.hexdigest()
def _get_cached_data(self, partial_func):
cache_id = self._create_id_from_func(partial_func)
return self._get(cache_id), cache_id
def get_cached_only(self, func, *args, **keywords):
partial_func = partial(func, *args, **keywords)
# if caching is disabled call the function
if not self._enabled:
return partial_func()
# only return before cached data
data, cache_id = self._get_cached_data(partial_func)
if data is not None:
return data[0]
return None
def get(self, seconds, func, *args, **keywords):
def _seconds_difference(_first, _last):
_delta = _last - _first
return 24*60*60*_delta.days + _delta.seconds + _delta.microseconds/1000000.
"""
Returns the cached data of the given function.
:param partial_func: function to cache
:param seconds: time to live in seconds
:param return_cached_only: return only cached data and don't call the function
:return:
"""
partial_func = partial(func, *args, **keywords)
# if caching is disabled call the function
if not self._enabled:
return partial_func()
cached_data = None
cached_time = None
data, cache_id = self._get_cached_data(partial_func)
if data is not None:
cached_data = data[0]
cached_time = data[1]
pass
diff_seconds = 0
now = datetime.datetime.now()
if cached_time is not None:
# this is so stupid, but we have the function 'total_seconds' only starting with python 2.7
diff_seconds = _seconds_difference(cached_time, now)
pass
if cached_data is None or diff_seconds > seconds:
cached_data = partial_func()
self._set(cache_id, cached_data)
pass
return cached_data
pass
| gpl-2.0 |
vvw/linearAlgebra-coursera | assignment 6/factoring_lab/submit_factoring_lab.py | 3 | 12317 | # version code 988
######## ########
# Hi there, curious student. #
# #
# This submission script runs some tests on your #
# code and then uploads it to Coursera for grading. #
# #
# Changing anything in this script might cause your #
# submissions to fail. #
######## ########
import io, os, sys, doctest, traceback, importlib, urllib.request, urllib.parse, urllib.error, base64, hashlib, random, ast
SUBMIT_VERSION = "988"
URL = 'matrix-001'
part_friendly_names = ['Int 2 GF2', 'Make Vec', 'Find Candidates', 'Find a and b', 'Divisor of 2461799993978700679']
groups = [[('GiEBz32fRhO5iB2UE3SzSAIGgO1d1hW7', 'Int 2 GF2', '>>> from GF2 import one\n>>> print(test_format(int2GF2(1)))\n>>> print(test_format(int2GF2(12345)))\n>>> print(test_format(int2GF2(634)))\n>>> print(test_format(int2GF2(3441)))\n>>> print(test_format(int2GF2(0)))\n>>> print(test_format(int2GF2(45321)))\n')], [('GiEBz32fRhO5iB2UP17INIgvqBu0uBVy', 'Make Vec', '>>> from factoring_support import dumb_factor\n>>> l = {257, 2, 3, 277, 5, 7, 137, 11, 13, 271, 17, 19, 269, 149, 23, 281, 283, 29, 31, 163, 37, 167, 41, 43, 173, 47, 229, 179, 53, 59, 151, 61, 181, 193, 67, 197, 71, 73, 241, 79, 83, 139, 89, 199, 223, 263, 97, 227, 131, 101, 103, 233, 107, 109, 239, 157, 113, 211, 251, 293, 191, 127}\n>>> df = dumb_factor(7641, l)\n>>> print(test_format(make_Vec(l, df)))\n>>> print(test_format(l))\n>>> print(test_format(df))\n>>> print(test_format(make_Vec({2, 3, 5, 7}, [(2, 3), (3, 2)])))\n>>> print(test_format(make_Vec({2, 3, 5, 7, 137, 11, 13, 17, 19, 149, 23, 29, 31, 163, 37, 167, 41, 43, 173, 47, 179, 53, 59, 151, 61, 181, 193, 67, 197, 71, 73, 79, 83, 139, 89, 199, 97, 131, 101, 103, 107, 109, 157, 113, 191, 127}, dumb_factor(5432, {2, 3, 5, 7, 137, 11, 13, 17, 19, 149, 23, 29, 31, 163, 37, 167, 41, 43, 173, 47, 179, 53, 59, 151, 61, 181, 193, 67, 197, 71, 73, 79, 83, 139, 89, 199, 97, 131, 101, 103, 107, 109, 157, 113, 191, 127}))))\n')], [('GiEBz32fRhO5iB2UiyOmwC3BuYmiZfL0', 'Find Candidates', '>>> print(test_format(type(find_candidates(100, primes(7))) == tuple))\n>>> p = primes(32)\n>>> rowlist, roots = find_candidates(2419, p)\n>>> print(test_format((rowlist[:1+len(p)], roots[:1+len(p)])))\n>>> p = primes(30)\n>>> rowlist, roots = find_candidates(1234, p)\n>>> print(test_format((rowlist[:1+len(p)], roots[:1+len(p)])))\n>>> p = primes(50)\n>>> rowlist, roots = find_candidates(65231, p)\n>>> print(test_format((rowlist[:1+len(p)], roots[:1+len(p)])))\n')], [('GiEBz32fRhO5iB2UdVp7MX3aT4O0ULkn', 'Find a and b', '>>> v = Vec({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},{0: 0, 1: one, 2: one, 4: 0, 5: one, 11: one})\n>>> N = 2419\n>>> roots = [51, 52, 53, 58, 61, 62, 63, 67, 68, 71, 77, 79]\n>>> print(test_format(find_a_and_b(v, roots, N)))\n>>> v = Vec({0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11},{0: 0, 1: 0, 10: one, 2: one})\n>>> N = 2419\n>>> roots = [51, 52, 53, 58, 61, 62, 63, 67, 68, 71, 77, 79]\n>>> print(test_format(find_a_and_b(v, roots, N)))\n')], [('GiEBz32fRhO5iB2UJvOtHoslL9WjB0uY', 'Divisor of 2461799993978700679', '>>> print(test_format(smallest_nontrivial_divisor_of_2461799993978700679))\n')]]
source_files = ['factoring_lab.py'] * len(sum(groups,[]))
try:
import factoring_lab as solution
test_vars = vars(solution).copy()
except Exception as exc:
print(exc)
print("!! It seems like you have an error in your stencil file. Please fix before submitting.")
sys.exit(1)
def find_lines(varname):
return list(filter(lambda l: varname in l, list(open("python_lab.py"))))
def find_line(varname):
ls = find_lines(varname)
return ls[0] if len(ls) else None
def use_comprehension(varname):
lines = find_lines(varname)
for line in lines:
try:
if "comprehension" in ast.dump(ast.parse(line)):
return True
except: pass
return False
def double_comprehension(varname):
line = find_line(varname)
return ast.dump(ast.parse(line)).count("comprehension") == 2
def line_contains_substr(varname, word):
lines = find_line(varname)
for line in lines:
if word in line:
return True
return False
def test_format(obj, precision=6):
tf = lambda o: test_format(o, precision)
delimit = lambda o: ', '.join(o)
otype = type(obj)
if otype is str:
return "'%s'" % obj
elif otype is float or otype is int:
if otype is int:
obj = float(obj)
if -0.000001 < obj < 0.000001:
obj = 0.0
fstr = '%%.%df' % precision
return fstr % obj
elif otype is set:
if len(obj) == 0:
return 'set()'
return '{%s}' % delimit(sorted(map(tf, obj)))
elif otype is dict:
return '{%s}' % delimit(sorted(tf(k)+': '+tf(v) for k,v in obj.items()))
elif otype is list:
return '[%s]' % delimit(map(tf, obj))
elif otype is tuple:
return '(%s%s)' % (delimit(map(tf, obj)), ',' if len(obj) is 1 else '')
elif otype.__name__ in ['Vec','Mat']:
entries = tf({x:obj.f[x] for x in obj.f if tf(obj.f[x]) != tf(0)})
return '%s(%s, %s)' % (otype.__name__, test_format(obj.D), entries)
else:
return str(obj)
def output(tests):
dtst = doctest.DocTestParser().get_doctest(tests, test_vars, 0, '<string>', 0)
runner = ModifiedDocTestRunner()
runner.run(dtst)
return runner.results
test_vars['test_format'] = test_vars['tf'] = test_format
test_vars['find_lines'] = find_lines
test_vars['find_line'] = find_line
test_vars['use_comprehension'] = use_comprehension
test_vars['double_comprehension'] = double_comprehension
test_vars['line_contains_substr'] = line_contains_substr
base_url = '://class.coursera.org/%s/assignment/' % URL
protocol = 'https'
colorize = False
verbose = False
class ModifiedDocTestRunner(doctest.DocTestRunner):
def __init__(self, *args, **kwargs):
self.results = []
return super(ModifiedDocTestRunner, self).__init__(*args, checker=OutputAccepter(), **kwargs)
def report_success(self, out, test, example, got):
self.results.append(got)
def report_unexpected_exception(self, out, test, example, exc_info):
exf = traceback.format_exception_only(exc_info[0], exc_info[1])[-1]
self.results.append(exf)
class OutputAccepter(doctest.OutputChecker):
def check_output(self, want, got, optionflags):
return True
def submit(parts_string, login, password):
print('= Coding the Matrix Homework and Lab Submission')
if not login:
login = login_prompt()
if not password:
password = password_prompt()
if not parts_string:
parts_string = parts_prompt()
parts = parse_parts(parts_string)
if not all([parts, login, password]):
return
for sid, name, part_tests in parts:
sys.stdout.write('== Submitting "%s"' % name)
if 'DEV' in os.environ: sid += '-dev'
(login, ch, state, ch_aux) = get_challenge(login, sid)
if not all([login, ch, state]):
print(' !! Error: %s\n' % login)
return
# to stop Coursera's strip() from doing anything, we surround in parens
results = output(part_tests)
prog_out = '(%s)' % ''.join(map(str.rstrip, results))
token = challenge_response(login, password, ch)
src = source(sid)
feedback = submit_solution(login, token, sid, prog_out, src, state, ch_aux)
if len(feedback.strip()) > 0:
if colorize:
good = 'incorrect' not in feedback.lower()
print(': \033[1;3%dm%s\033[0m' % (2 if good else 1, feedback.strip()))
else:
print(': %s' % feedback.strip())
if verbose:
res_itr = iter(results)
for t in part_tests.split('\n'):
print(t)
if t[:3] == '>>>':
sys.stdout.write(next(res_itr))
# print(part_tests)
# print(results)
# for t, r in zip(part_tests.split('\n>>>'), results):
# sys.stdout.write('>>> %s\n%s' % (t, r))
sys.stdout.write('\n\n')
def login_prompt():
return input('Login email address: ')
def password_prompt():
return input("One-time password from the assignment page (NOT your own account's password): ")
def parts_prompt():
print('These are the assignment parts that you can submit:')
for i, name in enumerate(part_friendly_names):
print(' %d) %s' % (i+1, name))
return input('\nWhich parts do you want to submit? (Ex: 1, 4-7): ')
def parse_parts(string):
def extract_range(s):
s = s.split('-')
if len(s) == 1: return [int(s[0])]
else: return list(range(int(s[0]), 1+int(s[1])))
parts = map(extract_range, string.split(','))
flat_parts = sum(parts, [])
return sum(list(map(lambda p: groups[p-1], flat_parts)),[])
def get_challenge(email, sid):
"""Gets the challenge salt from the server. Returns (email,ch,state,ch_aux)."""
params = {'email_address': email, 'assignment_part_sid': sid, 'response_encoding': 'delim'}
challenge_url = '%s%schallenge' % (protocol, base_url)
data = urllib.parse.urlencode(params).encode('utf-8')
req = urllib.request.Request(challenge_url, data)
resp = urllib.request.urlopen(req)
text = resp.readall().decode('utf-8').strip().split('|')
if len(text) != 9:
print(' !! %s' % '|'.join(text))
sys.exit(1)
return tuple(text[x] for x in [2,4,6,8])
def challenge_response(email, passwd, challenge):
return hashlib.sha1((challenge+passwd).encode('utf-8')).hexdigest()
def submit_solution(email_address, ch_resp, sid, output, source, state, ch_aux):
b64ize = lambda s: str(base64.encodebytes(s.encode('utf-8')), 'ascii')
values = { 'assignment_part_sid' : sid
, 'email_address' : email_address
, 'submission' : b64ize(output)
, 'submission_aux' : b64ize(source)
, 'challenge_response' : ch_resp
, 'state' : state
}
submit_url = '%s%ssubmit' % (protocol, base_url)
data = urllib.parse.urlencode(values).encode('utf-8')
req = urllib.request.Request(submit_url, data)
response = urllib.request.urlopen(req)
return response.readall().decode('utf-8').strip()
def source(sid):
src = ['# submit version: %s' % SUBMIT_VERSION]
for fn in set(source_files):
with open(fn) as source_f:
src.append(source_f.read())
return '\n\n'.join(src)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
env = os.environ
helps = [ 'numbers or ranges of tasks to submit'
, 'the email address on your Coursera account'
, 'your ONE-TIME password'
, 'use ANSI color escape sequences'
, 'show the test\'s interaction with your code'
, 'use an encrypted connection to Coursera'
, 'use an unencrypted connection to Coursera'
]
parser.add_argument('tasks', default=env.get('COURSERA_TASKS'), nargs='*', help=helps[0])
parser.add_argument('--email', default=env.get('COURSERA_EMAIL'), help=helps[1])
parser.add_argument('--password', default=env.get('COURSERA_PASS'), help=helps[2])
parser.add_argument('--colorize', default=False, action='store_true', help=helps[3])
parser.add_argument('--verbose', default=False, action='store_true', help=helps[4])
group = parser.add_mutually_exclusive_group()
group.add_argument('--https', dest="protocol", const="https", action="store_const", help=helps[-2])
group.add_argument('--http', dest="protocol", const="http", action="store_const", help=helps[-1])
args = parser.parse_args()
if args.protocol: protocol = args.protocol
colorize = args.colorize
verbose = args.verbose
submit(','.join(args.tasks), args.email, args.password)
| mit |
opencobra/memote | src/memote/jinja2_extension.py | 2 | 1718 | # -*- coding: utf-8 -*-
# Copyright 2018 Novo Nordisk Foundation Center for Biosustainability,
# Technical University of Denmark.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provide a jinja2 extension for the cookiecutter.."""
from __future__ import absolute_import
import os
from os.path import basename, expanduser, isabs, join
from jinja2.ext import Extension
__all__ = ("MemoteExtension",)
class MemoteExtension(Extension):
"""Provide an absolute path to a file."""
tags = frozenset(["basename", "dirname", "abspath"])
def __init__(self, environment):
"""Initialize the extension and prepare the jinja2 environment."""
super(MemoteExtension, self).__init__(environment)
environment.filters["normalize"] = self.normalize
environment.filters["basename"] = basename
@staticmethod
def normalize(filename):
"""Return an absolute path of the given file name."""
# Default value means we do not resolve a model file.
if filename == "default":
return filename
filename = expanduser(filename)
if isabs(filename):
return filename
else:
return join(os.getcwd(), filename)
| apache-2.0 |
arbrandes/edx-platform | common/lib/xmodule/xmodule/tests/test_xblock_wrappers.py | 4 | 16278 | """
Tests for the wrapping layer that provides the XBlock API using XModule/Descriptor
functionality
"""
# For tests, ignore access to protected members
# pylint: disable=protected-access
from unittest.case import SkipTest, TestCase
from unittest.mock import Mock
import ddt
import webob
from webob.multidict import MultiDict
from factory import (
BUILD_STRATEGY,
Factory,
LazyAttributeSequence,
SubFactory,
lazy_attribute,
post_generation,
use_strategy
)
from fs.memoryfs import MemoryFS
from lxml import etree
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from xblock.core import XBlock
from xblock.field_data import DictFieldData
from xblock.fields import ScopeIds
from xmodule.annotatable_module import AnnotatableBlock
from xmodule.conditional_module import ConditionalBlock
from xmodule.course_module import CourseBlock
from xmodule.html_module import HtmlBlock
from xmodule.poll_module import PollBlock
from xmodule.randomize_module import RandomizeBlock
from xmodule.seq_module import SequenceBlock
from xmodule.tests import get_test_descriptor_system, get_test_system
from xmodule.vertical_block import VerticalBlock
from xmodule.word_cloud_module import WordCloudBlock
from xmodule.wrapper_module import WrapperBlock
from xmodule.x_module import (
PUBLIC_VIEW,
STUDENT_VIEW,
STUDIO_VIEW,
DescriptorSystem,
ModuleSystem,
XModule,
XModuleDescriptor
)
# A dictionary that maps specific XModuleDescriptor classes without children
# to a list of sample field values to test with.
# TODO: Add more types of sample data
LEAF_XMODULES = {
AnnotatableBlock: [{}],
HtmlBlock: [{}],
PollBlock: [{'display_name': 'Poll Display Name'}],
WordCloudBlock: [{}],
}
# A dictionary that maps specific XModuleDescriptor classes with children
# to a list of sample field values to test with.
# TODO: Add more types of sample data
CONTAINER_XMODULES = {
ConditionalBlock: [{}],
CourseBlock: [{}],
RandomizeBlock: [{'display_name': 'Test String Display'}],
SequenceBlock: [{'display_name': 'Test Unicode हिंदी Display'}],
VerticalBlock: [{}],
WrapperBlock: [{}],
}
# These modules are not editable in studio yet
NOT_STUDIO_EDITABLE = (
PollBlock,
)
def flatten(class_dict):
"""
Flatten a dict from cls -> [fields, ...] and yields values of the form (cls, fields)
for each entry in the dictionary value.
"""
for cls in sorted(class_dict, key=lambda err: err.__name__):
fields_list = class_dict[cls]
for fields in fields_list:
yield (cls, fields)
@use_strategy(BUILD_STRATEGY)
class ModuleSystemFactory(Factory):
"""
Factory to build a test ModuleSystem. Creation is
performed by :func:`xmodule.tests.get_test_system`, so
arguments for that function are valid factory attributes.
"""
class Meta:
model = ModuleSystem
@classmethod
def _build(cls, target_class, *args, **kwargs): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
"""See documentation from :meth:`factory.Factory._build`"""
return get_test_system(*args, **kwargs)
@use_strategy(BUILD_STRATEGY)
class DescriptorSystemFactory(Factory):
"""
Factory to build a test DescriptorSystem. Creation is
performed by :func:`xmodule.tests.get_test_descriptor_system`, so
arguments for that function are valid factory attributes.
"""
class Meta:
model = DescriptorSystem
@classmethod
def _build(cls, target_class, *args, **kwargs): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
"""See documentation from :meth:`factory.Factory._build`"""
return get_test_descriptor_system(*args, **kwargs)
class ContainerModuleRuntimeFactory(ModuleSystemFactory):
"""
Factory to generate a ModuleRuntime that generates children when asked
for them, for testing container XModules.
"""
@post_generation
def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument
"""
When `depth` is specified as a Factory parameter, creates a
tree of children with that many levels.
"""
# pylint: disable=no-member
if depth == 0:
self.get_module.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlBlock)
else:
self.get_module.side_effect = lambda x: ContainerModuleFactory(
descriptor_cls=VerticalBlock,
depth=depth - 1
)
@post_generation
def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden
"""
Update the position attribute of the generated ModuleRuntime.
"""
self.position = position
class ContainerDescriptorRuntimeFactory(DescriptorSystemFactory):
"""
Factory to generate a DescriptorRuntime that generates children when asked
for them, for testing container XModuleDescriptors.
"""
@post_generation
def depth(self, create, depth, **kwargs): # pylint: disable=unused-argument
"""
When `depth` is specified as a Factory parameter, creates a
tree of children with that many levels.
"""
# pylint: disable=no-member
if depth == 0:
self.load_item.side_effect = lambda x: LeafModuleFactory(descriptor_cls=HtmlBlock)
else:
self.load_item.side_effect = lambda x: ContainerModuleFactory(
descriptor_cls=VerticalBlock,
depth=depth - 1
)
@post_generation
def position(self, create, position=2, **kwargs): # pylint: disable=unused-argument, method-hidden
"""
Update the position attribute of the generated ModuleRuntime.
"""
self.position = position
@use_strategy(BUILD_STRATEGY)
class LeafDescriptorFactory(Factory):
"""
Factory to generate leaf XModuleDescriptors.
"""
class Meta:
model = XModuleDescriptor
runtime = SubFactory(DescriptorSystemFactory)
url_name = LazyAttributeSequence('{.block_type}_{}'.format)
@lazy_attribute
def location(self):
return BlockUsageLocator(CourseLocator('org', 'course', 'run'), 'category', self.url_name)
@lazy_attribute
def block_type(self):
return self.descriptor_cls.__name__ # pylint: disable=no-member
@lazy_attribute
def definition_id(self):
return self.location
@lazy_attribute
def usage_id(self):
return self.location
@classmethod
def _build(cls, target_class, *args, **kwargs): # lint-amnesty, pylint: disable=arguments-differ, unused-argument
runtime = kwargs.pop('runtime')
desc_cls = kwargs.pop('descriptor_cls')
block_type = kwargs.pop('block_type')
def_id = kwargs.pop('definition_id')
usage_id = kwargs.pop('usage_id')
block = runtime.construct_xblock_from_class(
desc_cls,
ScopeIds(None, block_type, def_id, usage_id),
DictFieldData(dict(**kwargs))
)
block.save()
return block
class LeafModuleFactory(LeafDescriptorFactory):
"""
Factory to generate leaf XModuleDescriptors that are prepped to be
used as XModules.
"""
@post_generation
def xmodule_runtime(self, create, xmodule_runtime, **kwargs): # pylint: disable=method-hidden, unused-argument
"""
Set the xmodule_runtime to make this XModuleDescriptor usable
as an XModule.
"""
if xmodule_runtime is None:
xmodule_runtime = ModuleSystemFactory()
self.xmodule_runtime = xmodule_runtime
class ContainerDescriptorFactory(LeafDescriptorFactory):
"""
Factory to generate XModuleDescriptors that are containers.
"""
runtime = SubFactory(ContainerDescriptorRuntimeFactory)
children = list(range(3))
class ContainerModuleFactory(LeafModuleFactory):
"""
Factory to generate XModuleDescriptors that are containers
and are ready to act as XModules.
"""
@lazy_attribute
def xmodule_runtime(self): # lint-amnesty, pylint: disable=arguments-differ
return ContainerModuleRuntimeFactory(depth=self.depth) # pylint: disable=no-member
@ddt.ddt
class XBlockWrapperTestMixin:
"""
This is a mixin for building tests of the implementation of the XBlock
api by wrapping XModule native functions.
You can create an actual test case by inheriting from this class and UnitTest,
and implement skip_if_invalid and check_property.
"""
def skip_if_invalid(self, descriptor_cls):
"""
Raise SkipTest if this descriptor_cls shouldn't be tested.
"""
pass # lint-amnesty, pylint: disable=unnecessary-pass
def check_property(self, descriptor):
"""
Execute assertions to verify that the property under test is true for
the supplied descriptor.
"""
raise SkipTest("check_property not defined")
# Test that for all of the leaf XModule Descriptors,
# the test property holds
@ddt.data(*flatten(LEAF_XMODULES))
def test_leaf_node(self, cls_and_fields):
descriptor_cls, fields = cls_and_fields
self.skip_if_invalid(descriptor_cls)
descriptor = LeafModuleFactory(descriptor_cls=descriptor_cls, **fields)
mocked_course = Mock()
modulestore = Mock()
modulestore.get_course.return_value = mocked_course
# pylint: disable=no-member
descriptor.runtime.id_reader.get_definition_id = Mock(return_value='a')
descriptor.runtime.modulestore = modulestore
if hasattr(descriptor, '_xmodule'):
descriptor._xmodule.graded = 'False'
self.check_property(descriptor)
# Test that when an xmodule is generated from descriptor_cls
# with only xmodule children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_xmodules_only(self, cls_and_fields):
descriptor_cls, fields = cls_and_fields
self.skip_if_invalid(descriptor_cls)
descriptor = ContainerModuleFactory(descriptor_cls=descriptor_cls, depth=2, **fields)
descriptor.runtime.id_reader.get_definition_id = Mock(return_value='a')
self.check_property(descriptor)
# Test that when an xmodule is generated from descriptor_cls
# with mixed xmodule and xblock children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_mixed(self, cls_and_fields):
raise SkipTest("XBlock support in XDescriptor not yet fully implemented")
# Test that when an xmodule is generated from descriptor_cls
# with only xblock children, the test property holds
@ddt.data(*flatten(CONTAINER_XMODULES))
def test_container_node_xblocks_only(self, cls_and_fields):
raise SkipTest("XBlock support in XModules not yet fully implemented")
class TestStudentView(XBlockWrapperTestMixin, TestCase):
"""
This tests that student_view and XModule.get_html produce the same results.
"""
def skip_if_invalid(self, descriptor_cls):
pure_xblock_class = issubclass(descriptor_cls, XBlock) and not issubclass(descriptor_cls, XModuleDescriptor)
if pure_xblock_class:
student_view = descriptor_cls.student_view
else:
student_view = descriptor_cls.module_class.student_view
if student_view != XModule.student_view:
raise SkipTest(descriptor_cls.__name__ + " implements student_view")
def check_property(self, descriptor):
"""
Assert that both student_view and get_html render the same.
"""
assert descriptor._xmodule.get_html() == descriptor.render(STUDENT_VIEW).content
class TestStudioView(XBlockWrapperTestMixin, TestCase):
"""
This tests that studio_view and XModuleDescriptor.get_html produce the same results
"""
def skip_if_invalid(self, descriptor_cls):
if descriptor_cls in NOT_STUDIO_EDITABLE:
raise SkipTest(descriptor_cls.__name__ + " is not editable in studio")
pure_xblock_class = issubclass(descriptor_cls, XBlock) and not issubclass(descriptor_cls, XModuleDescriptor)
if pure_xblock_class: # lint-amnesty, pylint: disable=no-else-raise
raise SkipTest(descriptor_cls.__name__ + " is a pure XBlock and implements studio_view")
elif descriptor_cls.studio_view != XModuleDescriptor.studio_view:
raise SkipTest(descriptor_cls.__name__ + " implements studio_view")
def check_property(self, descriptor):
"""
Assert that studio_view and get_html render the same.
"""
html = descriptor.get_html()
rendered_content = descriptor.render(STUDIO_VIEW).content
assert html == rendered_content
@ddt.ddt
class TestXModuleHandler(TestCase):
"""
Tests that the xmodule_handler function correctly wraps handle_ajax
"""
def setUp(self):
super().setUp()
self.module = XModule(descriptor=Mock(), field_data=Mock(), runtime=Mock(), scope_ids=Mock())
self.module.handle_ajax = Mock(return_value='{}')
self.request = webob.Request({})
def test_xmodule_handler_passed_data(self):
self.module.xmodule_handler(self.request)
self.module.handle_ajax.assert_called_with(None, MultiDict(self.request.POST))
def test_xmodule_handler_dispatch(self):
self.module.xmodule_handler(self.request, 'dispatch')
self.module.handle_ajax.assert_called_with('dispatch', MultiDict(self.request.POST))
def test_xmodule_handler_return_value(self):
response = self.module.xmodule_handler(self.request)
assert isinstance(response, webob.Response)
assert response.body.decode('utf-8') == '{}'
@ddt.data(
'{"test_key": "test_value"}',
'{"test_key": "test_value"}',
)
def test_xmodule_handler_with_data(self, response_data):
"""
Tests that xmodule_handler function correctly wraps handle_ajax when handle_ajax response is either
str or unicode.
"""
self.module.handle_ajax = Mock(return_value=response_data)
response = self.module.xmodule_handler(self.request)
assert isinstance(response, webob.Response)
assert response.body.decode('utf-8') == '{"test_key": "test_value"}'
class TestXmlExport(XBlockWrapperTestMixin, TestCase):
"""
This tests that XModuleDescriptor.export_course_to_xml and add_xml_to_node produce the same results.
"""
def skip_if_invalid(self, descriptor_cls):
if descriptor_cls.add_xml_to_node != XModuleDescriptor.add_xml_to_node:
raise SkipTest(descriptor_cls.__name__ + " implements add_xml_to_node")
def check_property(self, descriptor):
xmodule_api_fs = MemoryFS()
xblock_api_fs = MemoryFS()
descriptor.runtime.export_fs = xblock_api_fs
xblock_node = etree.Element('unknown')
descriptor.add_xml_to_node(xblock_node)
xmodule_node = etree.fromstring(descriptor.export_to_xml(xmodule_api_fs))
assert list(xmodule_api_fs.walk()) == list(xblock_api_fs.walk())
assert etree.tostring(xmodule_node) == etree.tostring(xblock_node)
class TestPublicView(XBlockWrapperTestMixin, TestCase):
"""
This tests that default public_view shows the correct message.
"""
def skip_if_invalid(self, descriptor_cls):
pure_xblock_class = issubclass(descriptor_cls, XBlock) and not issubclass(descriptor_cls, XModuleDescriptor)
if pure_xblock_class:
public_view = descriptor_cls.public_view
else:
public_view = descriptor_cls.module_class.public_view
if public_view != XModule.public_view:
raise SkipTest(descriptor_cls.__name__ + " implements public_view")
def check_property(self, descriptor):
"""
Assert that public_view contains correct message.
"""
if descriptor.display_name:
assert descriptor.display_name in descriptor.render(PUBLIC_VIEW).content
else:
assert 'This content is only accessible' in descriptor.render(PUBLIC_VIEW).content
| agpl-3.0 |
c0hen/django-venv | lib/python3.4/site-packages/django/http/cookie.py | 119 | 2895 | from __future__ import unicode_literals
import sys
from django.utils import six
from django.utils.encoding import force_str
from django.utils.six.moves import http_cookies
# http://bugs.python.org/issue2193 is fixed in Python 3.3+.
_cookie_allows_colon_in_names = six.PY3
# Cookie pickling bug is fixed in Python 2.7.9 and Python 3.4.3+
# http://bugs.python.org/issue22775
cookie_pickles_properly = (
(sys.version_info[:2] == (2, 7) and sys.version_info >= (2, 7, 9)) or
sys.version_info >= (3, 4, 3)
)
if _cookie_allows_colon_in_names and cookie_pickles_properly:
SimpleCookie = http_cookies.SimpleCookie
else:
Morsel = http_cookies.Morsel
class SimpleCookie(http_cookies.SimpleCookie):
if not cookie_pickles_properly:
def __setitem__(self, key, value):
# Apply the fix from http://bugs.python.org/issue22775 where
# it's not fixed in Python itself
if isinstance(value, Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
super(SimpleCookie, self).__setitem__(key, value)
if not _cookie_allows_colon_in_names:
def load(self, rawdata):
self.bad_cookies = set()
if isinstance(rawdata, six.text_type):
rawdata = force_str(rawdata)
super(SimpleCookie, self).load(rawdata)
for key in self.bad_cookies:
del self[key]
# override private __set() method:
# (needed for using our Morsel, and for laxness with CookieError
def _BaseCookie__set(self, key, real_value, coded_value):
key = force_str(key)
try:
M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
except http_cookies.CookieError:
if not hasattr(self, 'bad_cookies'):
self.bad_cookies = set()
self.bad_cookies.add(key)
dict.__setitem__(self, key, http_cookies.Morsel())
def parse_cookie(cookie):
"""
Return a dictionary parsed from a `Cookie:` header string.
"""
cookiedict = {}
if six.PY2:
cookie = force_str(cookie)
for chunk in cookie.split(str(';')):
if str('=') in chunk:
key, val = chunk.split(str('='), 1)
else:
# Assume an empty name per
# https://bugzilla.mozilla.org/show_bug.cgi?id=169091
key, val = str(''), chunk
key, val = key.strip(), val.strip()
if key or val:
# unquote using Python's algorithm.
cookiedict[key] = http_cookies._unquote(val)
return cookiedict
| gpl-3.0 |
sestrella/ansible | lib/ansible/modules/cloud/google/gcp_compute_image.py | 3 | 28985 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Google
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# ----------------------------------------------------------------------------
#
# *** AUTO GENERATED CODE *** AUTO GENERATED CODE ***
#
# ----------------------------------------------------------------------------
#
# This file is automatically generated by Magic Modules and manual
# changes will be clobbered when the file is regenerated.
#
# Please read more about how to change this file at
# https://www.github.com/GoogleCloudPlatform/magic-modules
#
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
__metaclass__ = type
################################################################################
# Documentation
################################################################################
ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ["preview"], 'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gcp_compute_image
description:
- Represents an Image resource.
- Google Compute Engine uses operating system images to create the root persistent
disks for your instances. You specify an image when you create an instance. Images
contain a boot loader, an operating system, and a root file system. Linux operating
system images are also capable of running containers on Compute Engine.
- Images can be either public or custom.
- Public images are provided and maintained by Google, open-source communities, and
third-party vendors. By default, all projects have access to these images and can
use them to create instances. Custom images are available only to your project.
You can create a custom image from root persistent disks and other images. Then,
use the custom image to create an instance.
short_description: Creates a GCP Image
version_added: '2.6'
author: Google Inc. (@googlecloudplatform)
requirements:
- python >= 2.6
- requests >= 2.18.4
- google-auth >= 1.3.0
options:
state:
description:
- Whether the given object should exist in GCP
choices:
- present
- absent
default: present
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
required: false
type: str
disk_size_gb:
description:
- Size of the image when restored onto a persistent disk (in GB).
required: false
type: int
family:
description:
- The name of the image family to which this image belongs. You can create disks
by specifying an image family instead of a specific image name. The image family
always returns its latest image that is not deprecated. The name of the image
family must comply with RFC1035.
required: false
type: str
guest_os_features:
description:
- A list of features to enable on the guest operating system.
- Applicable only for bootable images.
required: false
type: list
suboptions:
type:
description:
- The type of supported feature.
- 'Some valid choices include: "MULTI_IP_SUBNET", "SECURE_BOOT", "UEFI_COMPATIBLE",
"VIRTIO_SCSI_MULTIQUEUE", "WINDOWS"'
required: false
type: str
image_encryption_key:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the
same key if you use the image later (e.g. to create a disk from the image) .
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
labels:
description:
- Labels to apply to this Image.
required: false
type: dict
version_added: '2.8'
licenses:
description:
- Any applicable license URI.
required: false
type: list
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
required: true
type: str
raw_disk:
description:
- The parameters of the raw disk image.
required: false
type: dict
suboptions:
container_type:
description:
- The format used to encode and transmit the block device, which should be
TAR. This is just a container and transmission format and not a runtime
format. Provided by the client when the disk image is created.
- 'Some valid choices include: "TAR"'
required: false
type: str
sha1_checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
required: false
type: str
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must
provide either this property or the sourceDisk property but not both.
required: true
type: str
source_disk:
description:
- The source disk to create this image based on.
- You must provide either this property or the rawDisk.source property but not
both to create an image.
- 'This field represents a link to a Disk resource in GCP. It can be specified
in two ways. First, you can place a dictionary with key ''selfLink'' and value
of your resource''s selfLink Alternatively, you can add `register: name-of-resource`
to a gcp_compute_disk task and then set this source_disk field to "{{ name-of-resource
}}"'
required: false
type: dict
source_disk_encryption_key:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
required: false
type: dict
suboptions:
raw_key:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
required: false
type: str
source_disk_id:
description:
- The ID value of the disk used to create this image. This value may be used to
determine whether the image was taken from the current or a previous instance
of a given disk name.
required: false
type: str
source_type:
description:
- The type of the image used to create this disk. The default and only value is
RAW .
- 'Some valid choices include: "RAW"'
required: false
type: str
project:
description:
- The Google Cloud Platform project to use.
type: str
auth_kind:
description:
- The type of credential used.
type: str
required: true
choices:
- application
- machineaccount
- serviceaccount
service_account_contents:
description:
- The contents of a Service Account JSON file, either in a dictionary or as a
JSON string that represents it.
type: jsonarg
service_account_file:
description:
- The path of a Service Account JSON file if serviceaccount is selected as type.
type: path
service_account_email:
description:
- An optional service account email address if machineaccount is selected and
the user does not wish to use the default email.
type: str
scopes:
description:
- Array of scopes to be used
type: list
env_type:
description:
- Specifies which Ansible environment you're running this module within.
- This should not be set unless you know what you're doing.
- This only alters the User Agent string for any API requests.
type: str
notes:
- 'API Reference: U(https://cloud.google.com/compute/docs/reference/v1/images)'
- 'Official Documentation: U(https://cloud.google.com/compute/docs/images)'
- for authentication, you can set service_account_file using the C(gcp_service_account_file)
env variable.
- for authentication, you can set service_account_contents using the C(GCP_SERVICE_ACCOUNT_CONTENTS)
env variable.
- For authentication, you can set service_account_email using the C(GCP_SERVICE_ACCOUNT_EMAIL)
env variable.
- For authentication, you can set auth_kind using the C(GCP_AUTH_KIND) env variable.
- For authentication, you can set scopes using the C(GCP_SCOPES) env variable.
- Environment variables values will only be used if the playbook values are not set.
- The I(service_account_email) and I(service_account_file) options are mutually exclusive.
'''
EXAMPLES = '''
- name: create a disk
gcp_compute_disk:
name: disk-image
zone: us-central1-a
project: "{{ gcp_project }}"
auth_kind: "{{ gcp_cred_kind }}"
service_account_file: "{{ gcp_cred_file }}"
state: present
register: disk
- name: create a image
gcp_compute_image:
name: test_object
source_disk: "{{ disk }}"
project: test_project
auth_kind: serviceaccount
service_account_file: "/tmp/auth.pem"
state: present
'''
RETURN = '''
archiveSizeBytes:
description:
- Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).
returned: success
type: int
creationTimestamp:
description:
- Creation timestamp in RFC3339 text format.
returned: success
type: str
deprecated:
description:
- The deprecation status associated with this image.
returned: success
type: complex
contains:
deleted:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to DELETED. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
deprecated:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to DEPRECATED. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
obsolete:
description:
- An optional RFC3339 timestamp on or after which the state of this resource
is intended to change to OBSOLETE. This is only informational and the status
will not change unless the client explicitly changes it.
returned: success
type: str
replacement:
description:
- The URL of the suggested replacement for a deprecated resource.
- The suggested replacement resource must be the same kind of resource as the
deprecated resource.
returned: success
type: str
state:
description:
- The deprecation state of this resource. This can be DEPRECATED, OBSOLETE,
or DELETED. Operations which create a new resource using a DEPRECATED resource
will return successfully, but with a warning indicating the deprecated resource
and recommending its replacement. Operations which use OBSOLETE or DELETED
resources will be rejected and result in an error.
returned: success
type: str
description:
description:
- An optional description of this resource. Provide this property when you create
the resource.
returned: success
type: str
diskSizeGb:
description:
- Size of the image when restored onto a persistent disk (in GB).
returned: success
type: int
family:
description:
- The name of the image family to which this image belongs. You can create disks
by specifying an image family instead of a specific image name. The image family
always returns its latest image that is not deprecated. The name of the image
family must comply with RFC1035.
returned: success
type: str
guestOsFeatures:
description:
- A list of features to enable on the guest operating system.
- Applicable only for bootable images.
returned: success
type: complex
contains:
type:
description:
- The type of supported feature.
returned: success
type: str
id:
description:
- The unique identifier for the resource. This identifier is defined by the server.
returned: success
type: int
imageEncryptionKey:
description:
- Encrypts the image using a customer-supplied encryption key.
- After you encrypt an image with a customer-supplied key, you must provide the
same key if you use the image later (e.g. to create a disk from the image) .
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
labels:
description:
- Labels to apply to this Image.
returned: success
type: dict
labelFingerprint:
description:
- The fingerprint used for optimistic locking of this resource. Used internally
during updates.
returned: success
type: str
licenses:
description:
- Any applicable license URI.
returned: success
type: list
name:
description:
- Name of the resource; provided by the client when the resource is created. The
name must be 1-63 characters long, and comply with RFC1035. Specifically, the
name must be 1-63 characters long and match the regular expression `[a-z]([-a-z0-9]*[a-z0-9])?`
which means the first character must be a lowercase letter, and all following
characters must be a dash, lowercase letter, or digit, except the last character,
which cannot be a dash.
returned: success
type: str
rawDisk:
description:
- The parameters of the raw disk image.
returned: success
type: complex
contains:
containerType:
description:
- The format used to encode and transmit the block device, which should be TAR.
This is just a container and transmission format and not a runtime format.
Provided by the client when the disk image is created.
returned: success
type: str
sha1Checksum:
description:
- An optional SHA1 checksum of the disk image before unpackaging.
- This is provided by the client when the disk image is created.
returned: success
type: str
source:
description:
- The full Google Cloud Storage URL where disk storage is stored You must provide
either this property or the sourceDisk property but not both.
returned: success
type: str
sourceDisk:
description:
- The source disk to create this image based on.
- You must provide either this property or the rawDisk.source property but not both
to create an image.
returned: success
type: dict
sourceDiskEncryptionKey:
description:
- The customer-supplied encryption key of the source disk. Required if the source
disk is protected by a customer-supplied encryption key.
returned: success
type: complex
contains:
rawKey:
description:
- Specifies a 256-bit customer-supplied encryption key, encoded in RFC 4648
base64 to either encrypt or decrypt this resource.
returned: success
type: str
sha256:
description:
- The RFC 4648 base64 encoded SHA-256 hash of the customer-supplied encryption
key that protects this resource.
returned: success
type: str
sourceDiskId:
description:
- The ID value of the disk used to create this image. This value may be used to
determine whether the image was taken from the current or a previous instance
of a given disk name.
returned: success
type: str
sourceType:
description:
- The type of the image used to create this disk. The default and only value is
RAW .
returned: success
type: str
'''
################################################################################
# Imports
################################################################################
from ansible.module_utils.gcp_utils import navigate_hash, GcpSession, GcpModule, GcpRequest, remove_nones_from_dict, replace_resource_dict
import json
import re
import time
################################################################################
# Main
################################################################################
def main():
"""Main function"""
module = GcpModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
description=dict(type='str'),
disk_size_gb=dict(type='int'),
family=dict(type='str'),
guest_os_features=dict(type='list', elements='dict', options=dict(type=dict(type='str'))),
image_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
labels=dict(type='dict'),
licenses=dict(type='list', elements='str'),
name=dict(required=True, type='str'),
raw_disk=dict(type='dict', options=dict(container_type=dict(type='str'), sha1_checksum=dict(type='str'), source=dict(required=True, type='str'))),
source_disk=dict(type='dict'),
source_disk_encryption_key=dict(type='dict', options=dict(raw_key=dict(type='str'))),
source_disk_id=dict(type='str'),
source_type=dict(type='str'),
)
)
if not module.params['scopes']:
module.params['scopes'] = ['https://www.googleapis.com/auth/compute']
state = module.params['state']
kind = 'compute#image'
fetch = fetch_resource(module, self_link(module), kind)
changed = False
if fetch:
if state == 'present':
if is_different(module, fetch):
update(module, self_link(module), kind, fetch)
fetch = fetch_resource(module, self_link(module), kind)
changed = True
else:
delete(module, self_link(module), kind)
fetch = {}
changed = True
else:
if state == 'present':
fetch = create(module, collection(module), kind)
changed = True
else:
fetch = {}
fetch.update({'changed': changed})
module.exit_json(**fetch)
def create(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.post(link, resource_to_request(module)))
def update(module, link, kind, fetch):
update_fields(module, resource_to_request(module), response_to_hash(module, fetch))
return fetch_resource(module, self_link(module), kind)
def update_fields(module, request, response):
if response.get('labels') != request.get('labels'):
labels_update(module, request, response)
def labels_update(module, request, response):
auth = GcpSession(module, 'compute')
auth.post(
''.join(["https://www.googleapis.com/compute/v1/", "projects/{project}/global/images/{name}/setLabels"]).format(**module.params),
{u'labels': module.params.get('labels'), u'labelFingerprint': response.get('labelFingerprint')},
)
def delete(module, link, kind):
auth = GcpSession(module, 'compute')
return wait_for_operation(module, auth.delete(link))
def resource_to_request(module):
request = {
u'kind': 'compute#image',
u'description': module.params.get('description'),
u'diskSizeGb': module.params.get('disk_size_gb'),
u'family': module.params.get('family'),
u'guestOsFeatures': ImageGuestosfeaturesArray(module.params.get('guest_os_features', []), module).to_request(),
u'imageEncryptionKey': ImageImageencryptionkey(module.params.get('image_encryption_key', {}), module).to_request(),
u'labels': module.params.get('labels'),
u'licenses': module.params.get('licenses'),
u'name': module.params.get('name'),
u'rawDisk': ImageRawdisk(module.params.get('raw_disk', {}), module).to_request(),
u'sourceDisk': replace_resource_dict(module.params.get(u'source_disk', {}), 'selfLink'),
u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(module.params.get('source_disk_encryption_key', {}), module).to_request(),
u'sourceDiskId': module.params.get('source_disk_id'),
u'sourceType': module.params.get('source_type'),
}
return_vals = {}
for k, v in request.items():
if v or v is False:
return_vals[k] = v
return return_vals
def fetch_resource(module, link, kind, allow_not_found=True):
auth = GcpSession(module, 'compute')
return return_if_object(module, auth.get(link), kind, allow_not_found)
def self_link(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images/{name}".format(**module.params)
def collection(module):
return "https://www.googleapis.com/compute/v1/projects/{project}/global/images".format(**module.params)
def return_if_object(module, response, kind, allow_not_found=False):
# If not found, return nothing.
if allow_not_found and response.status_code == 404:
return None
# If no content, return nothing.
if response.status_code == 204:
return None
try:
module.raise_for_status(response)
result = response.json()
except getattr(json.decoder, 'JSONDecodeError', ValueError):
module.fail_json(msg="Invalid JSON response with error: %s" % response.text)
if navigate_hash(result, ['error', 'errors']):
module.fail_json(msg=navigate_hash(result, ['error', 'errors']))
return result
def is_different(module, response):
request = resource_to_request(module)
response = response_to_hash(module, response)
# Remove all output-only from response.
response_vals = {}
for k, v in response.items():
if k in request:
response_vals[k] = v
request_vals = {}
for k, v in request.items():
if k in response:
request_vals[k] = v
return GcpRequest(request_vals) != GcpRequest(response_vals)
# Remove unnecessary properties from the response.
# This is for doing comparisons with Ansible's current parameters.
def response_to_hash(module, response):
return {
u'archiveSizeBytes': response.get(u'archiveSizeBytes'),
u'creationTimestamp': response.get(u'creationTimestamp'),
u'deprecated': ImageDeprecated(response.get(u'deprecated', {}), module).from_response(),
u'description': response.get(u'description'),
u'diskSizeGb': response.get(u'diskSizeGb'),
u'family': response.get(u'family'),
u'guestOsFeatures': ImageGuestosfeaturesArray(response.get(u'guestOsFeatures', []), module).from_response(),
u'id': response.get(u'id'),
u'imageEncryptionKey': ImageImageencryptionkey(response.get(u'imageEncryptionKey', {}), module).from_response(),
u'labels': response.get(u'labels'),
u'labelFingerprint': response.get(u'labelFingerprint'),
u'licenses': response.get(u'licenses'),
u'name': response.get(u'name'),
u'rawDisk': ImageRawdisk(response.get(u'rawDisk', {}), module).from_response(),
u'sourceDisk': response.get(u'sourceDisk'),
u'sourceDiskEncryptionKey': ImageSourcediskencryptionkey(response.get(u'sourceDiskEncryptionKey', {}), module).from_response(),
u'sourceDiskId': response.get(u'sourceDiskId'),
u'sourceType': response.get(u'sourceType'),
}
def license_selflink(name, params):
if name is None:
return
url = r"https://www.googleapis.com/compute/v1//projects/.*/global/licenses/.*"
if not re.match(url, name):
name = "https://www.googleapis.com/compute/v1//projects/{project}/global/licenses/%s".format(**params) % name
return name
def async_op_url(module, extra_data=None):
if extra_data is None:
extra_data = {}
url = "https://www.googleapis.com/compute/v1/projects/{project}/global/operations/{op_id}"
combined = extra_data.copy()
combined.update(module.params)
return url.format(**combined)
def wait_for_operation(module, response):
op_result = return_if_object(module, response, 'compute#operation')
if op_result is None:
return {}
status = navigate_hash(op_result, ['status'])
wait_done = wait_for_completion(status, op_result, module)
return fetch_resource(module, navigate_hash(wait_done, ['targetLink']), 'compute#image')
def wait_for_completion(status, op_result, module):
op_id = navigate_hash(op_result, ['name'])
op_uri = async_op_url(module, {'op_id': op_id})
while status != 'DONE':
raise_if_errors(op_result, ['error', 'errors'], module)
time.sleep(1.0)
op_result = fetch_resource(module, op_uri, 'compute#operation', False)
status = navigate_hash(op_result, ['status'])
return op_result
def raise_if_errors(response, err_path, module):
errors = navigate_hash(response, err_path)
if errors is not None:
module.fail_json(msg=errors)
class ImageDeprecated(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{
u'deleted': self.request.get('deleted'),
u'deprecated': self.request.get('deprecated'),
u'obsolete': self.request.get('obsolete'),
u'replacement': self.request.get('replacement'),
u'state': self.request.get('state'),
}
)
def from_response(self):
return remove_nones_from_dict(
{
u'deleted': self.request.get(u'deleted'),
u'deprecated': self.request.get(u'deprecated'),
u'obsolete': self.request.get(u'obsolete'),
u'replacement': self.request.get(u'replacement'),
u'state': self.request.get(u'state'),
}
)
class ImageGuestosfeaturesArray(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = []
def to_request(self):
items = []
for item in self.request:
items.append(self._request_for_item(item))
return items
def from_response(self):
items = []
for item in self.request:
items.append(self._response_from_item(item))
return items
def _request_for_item(self, item):
return remove_nones_from_dict({u'type': item.get('type')})
def _response_from_item(self, item):
return remove_nones_from_dict({u'type': item.get(u'type')})
class ImageImageencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
class ImageRawdisk(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict(
{u'containerType': self.request.get('container_type'), u'sha1Checksum': self.request.get('sha1_checksum'), u'source': self.request.get('source')}
)
def from_response(self):
return remove_nones_from_dict(
{u'containerType': self.request.get(u'containerType'), u'sha1Checksum': self.request.get(u'sha1Checksum'), u'source': self.request.get(u'source')}
)
class ImageSourcediskencryptionkey(object):
def __init__(self, request, module):
self.module = module
if request:
self.request = request
else:
self.request = {}
def to_request(self):
return remove_nones_from_dict({u'rawKey': self.request.get('raw_key')})
def from_response(self):
return remove_nones_from_dict({u'rawKey': self.request.get(u'rawKey')})
if __name__ == '__main__':
main()
| gpl-3.0 |
borysiasty/QGIS | tests/src/python/test_qgsmaprenderercache.py | 15 | 12023 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsMapRendererCache.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '1/02/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsMapRendererCache,
QgsRectangle,
QgsVectorLayer,
QgsProject)
from qgis.testing import start_app, unittest
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtGui import QImage
from time import sleep
start_app()
class TestQgsMapRendererCache(unittest.TestCase):
def testSetCacheImages(self):
cache = QgsMapRendererCache()
# not set image
im = cache.cacheImage('littlehands')
self.assertTrue(im.isNull())
self.assertFalse(cache.hasCacheImage('littlehands'))
# set image
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('littlehands', im)
self.assertFalse(im.isNull())
self.assertEqual(cache.cacheImage('littlehands'), im)
self.assertTrue(cache.hasCacheImage('littlehands'))
# test another not set image when cache has images
self.assertTrue(cache.cacheImage('bad').isNull())
self.assertFalse(cache.hasCacheImage('bad'))
# clear cache image
cache.clearCacheImage('not in cache') # no crash!
cache.clearCacheImage('littlehands')
im = cache.cacheImage('littlehands')
self.assertTrue(im.isNull())
self.assertFalse(cache.hasCacheImage('littlehands'))
# clear whole cache
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('littlehands', im)
self.assertFalse(im.isNull())
self.assertTrue(cache.hasCacheImage('littlehands'))
cache.clear()
im = cache.cacheImage('littlehands')
self.assertTrue(im.isNull())
self.assertFalse(cache.hasCacheImage('littlehands'))
def testInit(self):
cache = QgsMapRendererCache()
extent = QgsRectangle(1, 2, 3, 4)
self.assertFalse(cache.init(extent, 1000))
# add a cache image
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('layer', im)
self.assertFalse(cache.cacheImage('layer').isNull())
self.assertTrue(cache.hasCacheImage('layer'))
# re init, without changing extent or scale
self.assertTrue(cache.init(extent, 1000))
# image should still be in cache
self.assertFalse(cache.cacheImage('layer').isNull())
self.assertTrue(cache.hasCacheImage('layer'))
# reinit with different scale
self.assertFalse(cache.init(extent, 2000))
# cache should be cleared
self.assertTrue(cache.cacheImage('layer').isNull())
self.assertFalse(cache.hasCacheImage('layer'))
# readd image to cache
cache.setCacheImage('layer', im)
self.assertFalse(cache.cacheImage('layer').isNull())
self.assertTrue(cache.hasCacheImage('layer'))
# change extent
self.assertFalse(cache.init(QgsRectangle(11, 12, 13, 14), 2000))
# cache should be cleared
self.assertTrue(cache.cacheImage('layer').isNull())
self.assertFalse(cache.hasCacheImage('layer'))
def testRequestRepaintSimple(self):
""" test requesting repaint with a single dependent layer """
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
QgsProject.instance().addMapLayers([layer])
self.assertTrue(layer.isValid())
# add image to cache
cache = QgsMapRendererCache()
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('xxx', im, [layer])
self.assertFalse(cache.cacheImage('xxx').isNull())
self.assertTrue(cache.hasCacheImage('xxx'))
# trigger repaint on layer
layer.triggerRepaint()
# cache image should be cleared
self.assertTrue(cache.cacheImage('xxx').isNull())
self.assertFalse(cache.hasCacheImage('xxx'))
QgsProject.instance().removeMapLayer(layer.id())
# test that cache is also cleared on deferred update
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
cache.setCacheImage('xxx', im, [layer])
layer.triggerRepaint(True)
self.assertFalse(cache.hasCacheImage('xxx'))
def testInvalidateCacheForLayer(self):
""" test invalidating the cache for a layer """
layer = QgsVectorLayer("Point?field=fldtxt:string",
"layer", "memory")
QgsProject.instance().addMapLayers([layer])
self.assertTrue(layer.isValid())
# add image to cache
cache = QgsMapRendererCache()
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('xxx', im, [layer])
self.assertFalse(cache.cacheImage('xxx').isNull())
self.assertTrue(cache.hasCacheImage('xxx'))
# invalidate cache for layer
cache.invalidateCacheForLayer(layer)
# cache image should be cleared
self.assertTrue(cache.cacheImage('xxx').isNull())
self.assertFalse(cache.hasCacheImage('xxx'))
QgsProject.instance().removeMapLayer(layer.id())
def testRequestRepaintMultiple(self):
""" test requesting repaint with multiple dependent layers """
layer1 = QgsVectorLayer("Point?field=fldtxt:string",
"layer1", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer2", "memory")
QgsProject.instance().addMapLayers([layer1, layer2])
self.assertTrue(layer1.isValid())
self.assertTrue(layer2.isValid())
# add image to cache - no dependent layers
cache = QgsMapRendererCache()
im1 = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('nolayer', im1)
self.assertFalse(cache.cacheImage('nolayer').isNull())
self.assertTrue(cache.hasCacheImage('nolayer'))
# trigger repaint on layer
layer1.triggerRepaint()
layer1.triggerRepaint() # do this a couple of times - we don't want errors due to multiple disconnects, etc
layer2.triggerRepaint()
layer2.triggerRepaint()
# cache image should still exist - it's not dependent on layers
self.assertFalse(cache.cacheImage('nolayer').isNull())
self.assertTrue(cache.hasCacheImage('nolayer'))
# image depends on 1 layer
im_l1 = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('im1', im_l1, [layer1])
# image depends on 2 layers
im_l1_l2 = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('im1_im2', im_l1_l2, [layer1, layer2])
# image depends on 2nd layer alone
im_l2 = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('im2', im_l2, [layer2])
self.assertFalse(cache.cacheImage('im1').isNull())
self.assertTrue(cache.hasCacheImage('im1'))
self.assertFalse(cache.cacheImage('im1_im2').isNull())
self.assertTrue(cache.hasCacheImage('im1_im2'))
self.assertFalse(cache.cacheImage('im2').isNull())
self.assertTrue(cache.hasCacheImage('im2'))
# trigger repaint layer 1 (check twice - don't want disconnect errors)
for i in range(2):
layer1.triggerRepaint()
# should be cleared
self.assertTrue(cache.cacheImage('im1').isNull())
self.assertFalse(cache.hasCacheImage('im1'))
self.assertTrue(cache.cacheImage('im1_im2').isNull())
self.assertFalse(cache.hasCacheImage('im1_im2'))
# should be retained
self.assertTrue(cache.hasCacheImage('im2'))
self.assertFalse(cache.cacheImage('im2').isNull())
self.assertEqual(cache.cacheImage('im2'), im_l2)
self.assertTrue(cache.hasCacheImage('nolayer'))
self.assertFalse(cache.cacheImage('nolayer').isNull())
self.assertEqual(cache.cacheImage('nolayer'), im1)
# trigger repaint layer 2
for i in range(2):
layer2.triggerRepaint()
# should be cleared
self.assertFalse(cache.hasCacheImage('im1'))
self.assertTrue(cache.cacheImage('im1').isNull())
self.assertFalse(cache.hasCacheImage('im1_im2'))
self.assertTrue(cache.cacheImage('im1_im2').isNull())
self.assertFalse(cache.hasCacheImage('im2'))
self.assertTrue(cache.cacheImage('im2').isNull())
# should be retained
self.assertTrue(cache.hasCacheImage('nolayer'))
self.assertFalse(cache.cacheImage('nolayer').isNull())
self.assertEqual(cache.cacheImage('nolayer'), im1)
def testDependentLayers(self):
# bad layer tests
cache = QgsMapRendererCache()
self.assertEqual(cache.dependentLayers('not a layer'), [])
layer1 = QgsVectorLayer("Point?field=fldtxt:string",
"layer1", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer2", "memory")
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('no depends', im, [])
self.assertEqual(cache.dependentLayers('no depends'), [])
cache.setCacheImage('depends', im, [layer1, layer2])
self.assertEqual(set(cache.dependentLayers('depends')), set([layer1, layer2]))
def testLayerRemoval(self):
"""test that cached image is cleared when a dependent layer is removed"""
cache = QgsMapRendererCache()
layer1 = QgsVectorLayer("Point?field=fldtxt:string",
"layer1", "memory")
layer2 = QgsVectorLayer("Point?field=fldtxt:string",
"layer2", "memory")
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('depends', im, [layer1, layer2])
cache.setCacheImage('depends2', im, [layer1])
cache.setCacheImage('depends3', im, [layer2])
cache.setCacheImage('no depends', im, [])
self.assertTrue(cache.hasCacheImage('depends'))
self.assertTrue(cache.hasCacheImage('depends2'))
self.assertTrue(cache.hasCacheImage('depends3'))
self.assertTrue(cache.hasCacheImage('no depends'))
# try deleting a layer
layer2 = None
self.assertFalse(cache.hasCacheImage('depends'))
self.assertTrue(cache.hasCacheImage('depends2'))
self.assertFalse(cache.hasCacheImage('depends3'))
self.assertTrue(cache.hasCacheImage('no depends'))
layer1 = None
self.assertFalse(cache.hasCacheImage('depends'))
self.assertFalse(cache.hasCacheImage('depends2'))
self.assertFalse(cache.hasCacheImage('depends3'))
self.assertTrue(cache.hasCacheImage('no depends'))
def testClearOnLayerAutoRefresh(self):
""" test that cache is cleared when layer auto refresh is triggered """
cache = QgsMapRendererCache()
layer1 = QgsVectorLayer("Point?field=fldtxt:string",
"layer1", "memory")
im = QImage(200, 200, QImage.Format_RGB32)
cache.setCacheImage('l1', im, [layer1])
self.assertTrue(cache.hasCacheImage('l1'))
layer1.setAutoRefreshInterval(100)
layer1.setAutoRefreshEnabled(True)
self.assertTrue(cache.hasCacheImage('l1'))
# wait a second...
sleep(1)
QCoreApplication.processEvents()
# cache should be cleared
self.assertFalse(cache.hasCacheImage('l1'))
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
UManPychron/pychron | docs/user_guide/operation/scripts/examples/argus/measurement/jan_unknown90_20_no_center.py | 2 | 2277 | #!Measurement
'''
baseline:
after: true
before: false
counts: 20
detector: H1
mass: 34.2
settling_time: 15.0
default_fits: nominal_linear
equilibration:
eqtime: 1.0
inlet: R
inlet_delay: 3
outlet: O
use_extraction_eqtime: true
multicollect:
counts: 90
detector: H1
isotope: Ar40
peakcenter:
after: false
before: false
detector: H1
detectors:
- H1
- AX
- CDD
isotope: Ar40
integration_time: 0.262144
peakhop:
hops_name: ''
use_peak_hop: false
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2','CDD')
def main():
info('unknown measurement script')
activate_detectors(*ACTIVE_DETECTORS)
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#sniff the gas during equilibration
if mx.equilibration.use_extraction_eqtime:
eqt = eqtime
else:
eqt = mx.equilibration.eqtime
'''
Equilibrate is non-blocking so use a sniff or sleep as a placeholder
e.g sniff(<equilibration_time>) or sleep(<equilibration_time>)
'''
equilibrate(eqtime=eqt, inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet,
delay=mx.equilibration.inlet_delay)
set_time_zero()
sniff(eqt)
set_fits()
set_baseline_fits()
#multicollect on active detectors
multicollect(ncounts=mx.multicollect.counts, integration_time=1)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope,
integration_time=mx.peakcenter.integration_time)
if use_cdd_warming:
gosub('warm_cdd', argv=(mx.equilibration.outlet,))
info('finished measure script')
| apache-2.0 |
maiorBoltach/bf2142stats_emu | python/bf2/stats/rank.py | 2 | 2013 | # rank upgrades
import host
import bf2.PlayerManager
import bf2.Timer
from bf2.stats.constants import *
from bf2 import g_debug
def init():
# Events
#if g_debug: print "getUseGlobalRank = %s" % str(bf2.serverSettings.getUseGlobalRank())
#if bf2.serverSettings.getUseGlobalRank():
host.registerHandler('PlayerConnect', onPlayerConnect, 1)
host.registerHandler('PlayerStatsResponse', onStatsResponse, 1)
host.registerGameStatusHandler(onGameStatusChanged)
# Connect already connected players if reinitializing
for p in bf2.playerManager.getPlayers():
onPlayerConnect(p)
if g_debug: print "rank.py[24]: Rank module initialized"
def onGameStatusChanged(status):
if status == bf2.GameStatus.Playing:
pass
else:
if g_debug: print "rank.py[32]: Destroyed timer"
def onUpdate(data):
for p in bf2.playerManager.getPlayers():
if g_debug: print "rank.py[38]: checkRank"
if p.isAlive():
checkRank(p)
### Event hooks
def onPlayerConnect(player):
#id = player.index
if player.score.rank == -1:
player.score.rank = 0
# request rank
#if bf2.serverSettings.getUseGlobalRank():
if player.getProfileId() > 0:
success = host.pers_plrRequestStats(player.index, 1, "&mode=base", 0)
else:
if g_debug: print "rank.py[55]: Player %d had no profile id, can't request rank" % player.index
if g_debug: print "rank.py[58]: Added player %d to rank checking" % (player.index)
def onStatsResponse(succeeded, player, stats):
if player == None:
playerIndex = "unknown"
else:
playerIndex = player.index
if not "rnk" in stats:
if g_debug: print "rank.py[69]: rank not found, aborting"
return
if g_debug: print "rank.py[72]: Rank received for player ", playerIndex, ":", host.pers_getStatsKeyVal("rnk", player.getProfileId())
if not player: return
value = int( host.pers_getStatsKeyVal("rnk", player.getProfileId()) )
if g_debug: print "rank.py[77]: Player",player.index,"Rank:", value
player.score.rank = value
player.stats.rank = value
| gpl-2.0 |
nkhuyu/airflow | airflow/operators/postgres_operator.py | 14 | 1225 | import logging
from airflow.hooks import PostgresHook
from airflow.models import BaseOperator
from airflow.utils import apply_defaults
class PostgresOperator(BaseOperator):
"""
Executes sql code in a specific Postgres database
:param postgres_conn_id: reference to a specific postgres database
:type postgres_conn_id: string
:param sql: the sql code to be executed
:type sql: Can receive a str representing a sql statement,
a list of str (sql statements), or reference to a template file.
Template reference are recognized by str ending in '.sql'
"""
template_fields = ('sql',)
template_ext = ('.sql',)
ui_color = '#ededed'
@apply_defaults
def __init__(
self, sql,
postgres_conn_id='postgres_default', autocommit=False,
*args, **kwargs):
super(PostgresOperator, self).__init__(*args, **kwargs)
self.sql = sql
self.postgres_conn_id = postgres_conn_id
self.autocommit = autocommit
def execute(self, context):
logging.info('Executing: ' + str(self.sql))
self.hook = PostgresHook(postgres_conn_id=self.postgres_conn_id)
self.hook.run(self.sql, self.autocommit)
| apache-2.0 |
bzero/statsmodels | statsmodels/regression/tests/test_glsar_gretl.py | 25 | 25907 | # -*- coding: utf-8 -*-
"""Tests of GLSAR and diagnostics against Gretl
Created on Thu Feb 02 21:15:47 2012
Author: Josef Perktold
License: BSD-3
"""
import os
import numpy as np
from numpy.testing import (assert_almost_equal, assert_equal,
assert_approx_equal, assert_array_less)
from statsmodels.regression.linear_model import OLS, GLSAR
from statsmodels.tools.tools import add_constant
from statsmodels.datasets import macrodata
import statsmodels.stats.sandwich_covariance as sw
import statsmodels.stats.diagnostic as smsdia
#import statsmodels.sandbox.stats.diagnostic as smsdia
import statsmodels.stats.outliers_influence as oi
def compare_ftest(contrast_res, other, decimal=(5,4)):
assert_almost_equal(contrast_res.fvalue, other[0], decimal=decimal[0])
assert_almost_equal(contrast_res.pvalue, other[1], decimal=decimal[1])
assert_equal(contrast_res.df_num, other[2])
assert_equal(contrast_res.df_denom, other[3])
assert_equal("f", other[4])
class TestGLSARGretl(object):
def test_all(self):
d = macrodata.load().data
#import datasetswsm.greene as g
#d = g.load('5-1')
#growth rates
gs_l_realinv = 400 * np.diff(np.log(d['realinv']))
gs_l_realgdp = 400 * np.diff(np.log(d['realgdp']))
#simple diff, not growthrate, I want heteroscedasticity later for testing
endogd = np.diff(d['realinv'])
exogd = add_constant(np.c_[np.diff(d['realgdp']), d['realint'][:-1]])
endogg = gs_l_realinv
exogg = add_constant(np.c_[gs_l_realgdp, d['realint'][:-1]])
res_ols = OLS(endogg, exogg).fit()
#print res_ols.params
mod_g1 = GLSAR(endogg, exogg, rho=-0.108136)
res_g1 = mod_g1.fit()
#print res_g1.params
mod_g2 = GLSAR(endogg, exogg, rho=-0.108136) #-0.1335859) from R
res_g2 = mod_g2.iterative_fit(maxiter=5)
#print res_g2.params
rho = -0.108136
# coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
partable = np.array([
[-9.50990, 0.990456, -9.602, 3.65e-018, -11.4631, -7.55670], # ***
[ 4.37040, 0.208146, 21.00, 2.93e-052, 3.95993, 4.78086], # ***
[-0.579253, 0.268009, -2.161, 0.0319, -1.10777, -0.0507346]]) # **
#Statistics based on the rho-differenced data:
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.113973),
endog_std = ("S.D. dependent var", 18.67447),
ssr = ("Sum squared resid", 22530.90),
mse_resid_sqrt = ("S.E. of regression", 10.66735),
rsquared = ("R-squared", 0.676973),
rsquared_adj = ("Adjusted R-squared", 0.673710),
fvalue = ("F(2, 198)", 221.0475),
f_pvalue = ("P-value(F)", 3.56e-51),
resid_acf1 = ("rho", -0.003481),
dw = ("Durbin-Watson", 1.993858))
#fstatistic, p-value, df1, df2
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"]
#LM-statistic, p-value, df
arch_4 = [7.30776, 0.120491, 4, "chi2"]
#multicollinearity
vif = [1.002, 1.002]
cond_1norm = 6862.0664
determinant = 1.0296049e+009
reciprocal_condition_number = 0.013819244
#Chi-square(2): test-statistic, pvalue, df
normality = [20.2792, 3.94837e-005, 2]
#tests
res = res_g1 #with rho from Gretl
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 6)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=4)
assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=2)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=4)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
#tests
res = res_g2 #with estimated rho
#estimated lag coefficient
assert_almost_equal(res.model.rho, rho, decimal=3)
#basic
assert_almost_equal(res.params, partable[:,0], 4)
assert_almost_equal(res.bse, partable[:,1], 3)
assert_almost_equal(res.tvalues, partable[:,2], 2)
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
#assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=7) #not in gretl
#assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=7) #FAIL
#assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=7) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
assert_almost_equal(res.fvalue, result_gretl_g1['fvalue'][1], decimal=0)
assert_almost_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], decimal=6)
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(2,4))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(2,4))
#arch
#sm_arch = smsdia.acorr_lm(res.wresid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.wresid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=1)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=2)
'''
Performing iterative calculation of rho...
ITER RHO ESS
1 -0.10734 22530.9
2 -0.10814 22530.9
Model 4: Cochrane-Orcutt, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
rho = -0.108136
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.50990 0.990456 -9.602 3.65e-018 ***
ds_l_realgdp 4.37040 0.208146 21.00 2.93e-052 ***
realint_1 -0.579253 0.268009 -2.161 0.0319 **
Statistics based on the rho-differenced data:
Mean dependent var 3.113973 S.D. dependent var 18.67447
Sum squared resid 22530.90 S.E. of regression 10.66735
R-squared 0.676973 Adjusted R-squared 0.673710
F(2, 198) 221.0475 P-value(F) 3.56e-51
rho -0.003481 Durbin-Watson 1.993858
'''
'''
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023:
'''
'''
Test for ARCH of order 4
coefficient std. error t-ratio p-value
--------------------------------------------------------
alpha(0) 97.0386 20.3234 4.775 3.56e-06 ***
alpha(1) 0.176114 0.0714698 2.464 0.0146 **
alpha(2) -0.0488339 0.0724981 -0.6736 0.5014
alpha(3) -0.0705413 0.0737058 -0.9571 0.3397
alpha(4) 0.0384531 0.0725763 0.5298 0.5968
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491:
'''
'''
Variance Inflation Factors
Minimum possible value = 1.0
Values > 10.0 may indicate a collinearity problem
ds_l_realgdp 1.002
realint_1 1.002
VIF(j) = 1/(1 - R(j)^2), where R(j) is the multiple correlation coefficient
between variable j and the other independent variables
Properties of matrix X'X:
1-norm = 6862.0664
Determinant = 1.0296049e+009
Reciprocal condition number = 0.013819244
'''
'''
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 7.30776
with p-value = P(Chi-square(4) > 7.30776) = 0.120491
Test of common factor restriction -
Null hypothesis: restriction is acceptable
Test statistic: F(2, 195) = 0.426391
with p-value = P(F(2, 195) > 0.426391) = 0.653468
Test for normality of residual -
Null hypothesis: error is normally distributed
Test statistic: Chi-square(2) = 20.2792
with p-value = 3.94837e-005:
'''
#no idea what this is
'''
Augmented regression for common factor test
OLS, using observations 1959:3-2009:3 (T = 201)
Dependent variable: ds_l_realinv
coefficient std. error t-ratio p-value
---------------------------------------------------------------
const -10.9481 1.35807 -8.062 7.44e-014 ***
ds_l_realgdp 4.28893 0.229459 18.69 2.40e-045 ***
realint_1 -0.662644 0.334872 -1.979 0.0492 **
ds_l_realinv_1 -0.108892 0.0715042 -1.523 0.1294
ds_l_realgdp_1 0.660443 0.390372 1.692 0.0923 *
realint_2 0.0769695 0.341527 0.2254 0.8219
Sum of squared residuals = 22432.8
Test of common factor restriction
Test statistic: F(2, 195) = 0.426391, with p-value = 0.653468
'''
################ with OLS, HAC errors
#Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
#Dependent variable: ds_l_realinv
#HAC standard errors, bandwidth 4 (Bartlett kernel)
#coefficient std. error t-ratio p-value 95% CONFIDENCE INTERVAL
#for confidence interval t(199, 0.025) = 1.972
partable = np.array([
[-9.48167, 1.17709, -8.055, 7.17e-014, -11.8029, -7.16049], # ***
[4.37422, 0.328787, 13.30, 2.62e-029, 3.72587, 5.02258], #***
[-0.613997, 0.293619, -2.091, 0.0378, -1.19300, -0.0349939]]) # **
result_gretl_g1 = dict(
endog_mean = ("Mean dependent var", 3.257395),
endog_std = ("S.D. dependent var", 18.73915),
ssr = ("Sum squared resid", 22799.68),
mse_resid_sqrt = ("S.E. of regression", 10.70380),
rsquared = ("R-squared", 0.676978),
rsquared_adj = ("Adjusted R-squared", 0.673731),
fvalue = ("F(2, 199)", 90.79971),
f_pvalue = ("P-value(F)", 9.53e-29),
llf = ("Log-likelihood", -763.9752),
aic = ("Akaike criterion", 1533.950),
bic = ("Schwarz criterion", 1543.875),
hqic = ("Hannan-Quinn", 1537.966),
resid_acf1 = ("rho", -0.107341),
dw = ("Durbin-Watson", 2.213805))
linear_logs = [1.68351, 0.430953, 2, "chi2"]
#for logs: dropping 70 nan or incomplete observations, T=133
#(res_ols.model.exog <=0).any(1).sum() = 69 ?not 70
linear_squares = [7.52477, 0.0232283, 2, "chi2"]
#Autocorrelation, Breusch-Godfrey test for autocorrelation up to order 4
lm_acorr4 = [1.17928, 0.321197, 4, 195, "F"]
lm2_acorr4 = [4.771043, 0.312, 4, "chi2"]
acorr_ljungbox4 = [5.23587, 0.264, 4, "chi2"]
#break
cusum_Harvey_Collier = [0.494432, 0.621549, 198, "t"] #stats.t.sf(0.494432, 198)*2
#see cusum results in files
break_qlr = [3.01985, 0.1, 3, 196, "maxF"] #TODO check this, max at 2001:4
break_chow = [13.1897, 0.00424384, 3, "chi2"] # break at 1984:1
arch_4 = [3.43473, 0.487871, 4, "chi2"]
normality = [23.962, 0.00001, 2, "chi2"]
het_white = [33.503723, 0.000003, 5, "chi2"]
het_breusch_pagan = [1.302014, 0.521520, 2, "chi2"] #TODO: not available
het_breusch_pagan_konker = [0.709924, 0.701200, 2, "chi2"]
reset_2_3 = [5.219019, 0.00619, 2, 197, "f"]
reset_2 = [7.268492, 0.00762, 1, 198, "f"]
reset_3 = [5.248951, 0.023, 1, 198, "f"] #not available
cond_1norm = 5984.0525
determinant = 7.1087467e+008
reciprocal_condition_number = 0.013826504
vif = [1.001, 1.001]
names = 'date residual leverage influence DFFITS'.split()
cur_dir = os.path.abspath(os.path.dirname(__file__))
fpath = os.path.join(cur_dir, 'results/leverage_influence_ols_nostars.txt')
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=1,
converters={0:lambda s: s})
#either numpy 1.6 or python 3.2 changed behavior
if np.isnan(lev[-1]['f1']):
lev = np.genfromtxt(fpath, skip_header=3, skip_footer=2,
converters={0:lambda s: s})
lev.dtype.names = names
res = res_ols #for easier copying
cov_hac = sw.cov_hac_simple(res, nlags=4, use_correction=False)
bse_hac = sw.se_cov(cov_hac)
assert_almost_equal(res.params, partable[:,0], 5)
assert_almost_equal(bse_hac, partable[:,1], 5)
#TODO
assert_almost_equal(res.ssr, result_gretl_g1['ssr'][1], decimal=2)
assert_almost_equal(res.llf, result_gretl_g1['llf'][1], decimal=4) #not in gretl
assert_almost_equal(res.rsquared, result_gretl_g1['rsquared'][1], decimal=6) #FAIL
assert_almost_equal(res.rsquared_adj, result_gretl_g1['rsquared_adj'][1], decimal=6) #FAIL
assert_almost_equal(np.sqrt(res.mse_resid), result_gretl_g1['mse_resid_sqrt'][1], decimal=5)
#f-value is based on cov_hac I guess
#res2 = res.get_robustcov_results(cov_type='HC1')
# TODO: fvalue differs from Gretl, trying any of the HCx
#assert_almost_equal(res2.fvalue, result_gretl_g1['fvalue'][1], decimal=0) #FAIL
#assert_approx_equal(res.f_pvalue, result_gretl_g1['f_pvalue'][1], significant=1) #FAIL
#assert_almost_equal(res.durbin_watson, result_gretl_g1['dw'][1], decimal=7) #TODO
c = oi.reset_ramsey(res, degree=2)
compare_ftest(c, reset_2, decimal=(6,5))
c = oi.reset_ramsey(res, degree=3)
compare_ftest(c, reset_2_3, decimal=(6,5))
linear_sq = smsdia.linear_lm(res.resid, res.model.exog)
assert_almost_equal(linear_sq[0], linear_squares[0], decimal=6)
assert_almost_equal(linear_sq[1], linear_squares[1], decimal=7)
hbpk = smsdia.het_breuschpagan(res.resid, res.model.exog)
assert_almost_equal(hbpk[0], het_breusch_pagan_konker[0], decimal=6)
assert_almost_equal(hbpk[1], het_breusch_pagan_konker[1], decimal=6)
hw = smsdia.het_white(res.resid, res.model.exog)
assert_almost_equal(hw[:2], het_white[:2], 6)
#arch
#sm_arch = smsdia.acorr_lm(res.resid**2, maxlag=4, autolag=None)
sm_arch = smsdia.het_arch(res.resid, maxlag=4)
assert_almost_equal(sm_arch[0], arch_4[0], decimal=5)
assert_almost_equal(sm_arch[1], arch_4[1], decimal=6)
vif2 = [oi.variance_inflation_factor(res.model.exog, k) for k in [1,2]]
infl = oi.OLSInfluence(res_ols)
#print np.max(np.abs(lev['DFFITS'] - infl.dffits[0]))
#print np.max(np.abs(lev['leverage'] - infl.hat_matrix_diag))
#print np.max(np.abs(lev['influence'] - infl.influence)) #just added this based on Gretl
#just rough test, low decimal in Gretl output,
assert_almost_equal(lev['residual'], res.resid, decimal=3)
assert_almost_equal(lev['DFFITS'], infl.dffits[0], decimal=3)
assert_almost_equal(lev['leverage'], infl.hat_matrix_diag, decimal=3)
assert_almost_equal(lev['influence'], infl.influence, decimal=4)
def test_GLSARlag():
#test that results for lag>1 is close to lag=1, and smaller ssr
from statsmodels.datasets import macrodata
d2 = macrodata.load().data
g_gdp = 400*np.diff(np.log(d2['realgdp']))
g_inv = 400*np.diff(np.log(d2['realinv']))
exogg = add_constant(np.c_[g_gdp, d2['realint'][:-1]], prepend=False)
mod1 = GLSAR(g_inv, exogg, 1)
res1 = mod1.iterative_fit(5)
mod4 = GLSAR(g_inv, exogg, 4)
res4 = mod4.iterative_fit(10)
assert_array_less(np.abs(res1.params / res4.params - 1), 0.03)
assert_array_less(res4.ssr, res1.ssr)
assert_array_less(np.abs(res4.bse / res1.bse) - 1, 0.015)
assert_array_less(np.abs((res4.fittedvalues / res1.fittedvalues - 1).mean()),
0.015)
assert_equal(len(mod4.rho), 4)
if __name__ == '__main__':
t = TestGLSARGretl()
t.test_all()
'''
Model 5: OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: ds_l_realinv
HAC standard errors, bandwidth 4 (Bartlett kernel)
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const -9.48167 1.17709 -8.055 7.17e-014 ***
ds_l_realgdp 4.37422 0.328787 13.30 2.62e-029 ***
realint_1 -0.613997 0.293619 -2.091 0.0378 **
Mean dependent var 3.257395 S.D. dependent var 18.73915
Sum squared resid 22799.68 S.E. of regression 10.70380
R-squared 0.676978 Adjusted R-squared 0.673731
F(2, 199) 90.79971 P-value(F) 9.53e-29
Log-likelihood -763.9752 Akaike criterion 1533.950
Schwarz criterion 1543.875 Hannan-Quinn 1537.966
rho -0.107341 Durbin-Watson 2.213805
QLR test for structural break -
Null hypothesis: no structural break
Test statistic: max F(3, 196) = 3.01985 at observation 2001:4
(10 percent critical value = 4.09)
Non-linearity test (logs) -
Null hypothesis: relationship is linear
Test statistic: LM = 1.68351
with p-value = P(Chi-square(2) > 1.68351) = 0.430953
Non-linearity test (squares) -
Null hypothesis: relationship is linear
Test statistic: LM = 7.52477
with p-value = P(Chi-square(2) > 7.52477) = 0.0232283
LM test for autocorrelation up to order 4 -
Null hypothesis: no autocorrelation
Test statistic: LMF = 1.17928
with p-value = P(F(4,195) > 1.17928) = 0.321197
CUSUM test for parameter stability -
Null hypothesis: no change in parameters
Test statistic: Harvey-Collier t(198) = 0.494432
with p-value = P(t(198) > 0.494432) = 0.621549
Chow test for structural break at observation 1984:1 -
Null hypothesis: no structural break
Asymptotic test statistic: Chi-square(3) = 13.1897
with p-value = 0.00424384
Test for ARCH of order 4 -
Null hypothesis: no ARCH effect is present
Test statistic: LM = 3.43473
with p-value = P(Chi-square(4) > 3.43473) = 0.487871:
#ANOVA
Analysis of Variance:
Sum of squares df Mean square
Regression 47782.7 2 23891.3
Residual 22799.7 199 114.571
Total 70582.3 201 351.156
R^2 = 47782.7 / 70582.3 = 0.676978
F(2, 199) = 23891.3 / 114.571 = 208.528 [p-value 1.47e-049]
#LM-test autocorrelation
Breusch-Godfrey test for autocorrelation up to order 4
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 0.0640964 1.06719 0.06006 0.9522
ds_l_realgdp -0.0456010 0.217377 -0.2098 0.8341
realint_1 0.0511769 0.293136 0.1746 0.8616
uhat_1 -0.104707 0.0719948 -1.454 0.1475
uhat_2 -0.00898483 0.0742817 -0.1210 0.9039
uhat_3 0.0837332 0.0735015 1.139 0.2560
uhat_4 -0.0636242 0.0737363 -0.8629 0.3893
Unadjusted R-squared = 0.023619
Test statistic: LMF = 1.179281,
with p-value = P(F(4,195) > 1.17928) = 0.321
Alternative statistic: TR^2 = 4.771043,
with p-value = P(Chi-square(4) > 4.77104) = 0.312
Ljung-Box Q' = 5.23587,
with p-value = P(Chi-square(4) > 5.23587) = 0.264:
RESET test for specification (squares and cubes)
Test statistic: F = 5.219019,
with p-value = P(F(2,197) > 5.21902) = 0.00619
RESET test for specification (squares only)
Test statistic: F = 7.268492,
with p-value = P(F(1,198) > 7.26849) = 0.00762
RESET test for specification (cubes only)
Test statistic: F = 5.248951,
with p-value = P(F(1,198) > 5.24895) = 0.023
#heteroscedasticity White
White's test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 104.920 21.5848 4.861 2.39e-06 ***
ds_l_realgdp -29.7040 6.24983 -4.753 3.88e-06 ***
realint_1 -6.93102 6.95607 -0.9964 0.3203
sq_ds_l_realg 4.12054 0.684920 6.016 8.62e-09 ***
X2_X3 2.89685 1.38571 2.091 0.0379 **
sq_realint_1 0.662135 1.10919 0.5970 0.5512
Unadjusted R-squared = 0.165860
Test statistic: TR^2 = 33.503723,
with p-value = P(Chi-square(5) > 33.503723) = 0.000003:
#heteroscedasticity Breusch-Pagan (original)
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2
coefficient std. error t-ratio p-value
-------------------------------------------------------------
const 1.09468 0.192281 5.693 4.43e-08 ***
ds_l_realgdp -0.0323119 0.0386353 -0.8363 0.4040
realint_1 0.00410778 0.0512274 0.08019 0.9362
Explained sum of squares = 2.60403
Test statistic: LM = 1.302014,
with p-value = P(Chi-square(2) > 1.302014) = 0.521520
#heteroscedasticity Breusch-Pagan Koenker
Breusch-Pagan test for heteroskedasticity
OLS, using observations 1959:2-2009:3 (T = 202)
Dependent variable: scaled uhat^2 (Koenker robust variant)
coefficient std. error t-ratio p-value
------------------------------------------------------------
const 10.6870 21.7027 0.4924 0.6230
ds_l_realgdp -3.64704 4.36075 -0.8363 0.4040
realint_1 0.463643 5.78202 0.08019 0.9362
Explained sum of squares = 33174.2
Test statistic: LM = 0.709924,
with p-value = P(Chi-square(2) > 0.709924) = 0.701200
########## forecast
#forecast mean y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 2.946312 -22.987904 - -11.367905
2008:4 -27.665860 -36.294434 3.036851 -42.282972 - -30.305896
2009:1 -70.239280 -44.018178 4.007017 -51.919841 - -36.116516
2009:2 -27.024588 -12.284842 1.427414 -15.099640 - -9.470044
2009:3 8.078897 4.483669 1.315876 1.888819 - 7.078520
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
#forecast actual y
For 95% confidence intervals, t(199, 0.025) = 1.972
Obs ds_l_realinv prediction std. error 95% interval
2008:3 -7.134492 -17.177905 11.101892 -39.070353 - 4.714544
2008:4 -27.665860 -36.294434 11.126262 -58.234939 - -14.353928
2009:1 -70.239280 -44.018178 11.429236 -66.556135 - -21.480222
2009:2 -27.024588 -12.284842 10.798554 -33.579120 - 9.009436
2009:3 8.078897 4.483669 10.784377 -16.782652 - 25.749991
Forecast evaluation statistics
Mean Error -3.7387
Mean Squared Error 218.61
Root Mean Squared Error 14.785
Mean Absolute Error 12.646
Mean Percentage Error -7.1173
Mean Absolute Percentage Error -43.867
Theil's U 0.4365
Bias proportion, UM 0.06394
Regression proportion, UR 0.13557
Disturbance proportion, UD 0.80049
'''
| bsd-3-clause |
berendkleinhaneveld/Registrationshop | ui/transformations/TwoStepPicker.py | 1 | 13059 | """
TwoStepPicker
:Authors:
Berend Klein Haneveld
"""
from Picker import Picker
from core.decorators import overrides
from core.operations import Multiply
from core.operations import Add
from core.operations import ClosestPoints
from core.operations import Subtract
from core.operations import LineIntersectionWithTriangle
from core.operations import Length
from core.operations import Normalize
from core.vtkDrawing import CreateSphere
from core.vtkDrawing import CreateLine
from core.vtkDrawing import TransformWithMatrix
from vtk import vtkAssembly
from vtk import vtkProp3DFollower
from vtk import vtkMath
from vtk import vtkImageInterpolator
from PySide.QtCore import Signal
from PySide.QtCore import Slot
class TwoStepPicker(Picker):
"""
TwoStepPicker
"""
pickedLocation = Signal(list)
locatorUpdated = Signal(float)
def __init__(self):
super(TwoStepPicker, self).__init__()
self.props = []
self.overlayProps = []
self.lineActor = None
self.sphereSource = None
self.samples = None
self.sampleDiffs = None
def setPropertiesWidget(self, widget):
self.propertiesWidget = widget.twoStepWidget
self.propertiesWidget.histogramWidget.updatedPosition.connect(self.histogramUpdatedPosition)
self.locatorUpdated.connect(self.propertiesWidget.histogramWidget.locatorUpdated)
self.propertiesWidget.pickedPosition.connect(self.pickedPosition)
self.pickedLocation.connect(self.propertiesWidget.pickedLocation)
@overrides(Picker)
def setWidget(self, widget):
self.widget = widget
self.AddObserver(self.widget.rwi, "MouseMoveEvent", self.mouseMove)
self.AddObserver(self.widget.rwi, "KeyPressEvent", self.keyPress)
@overrides(Picker)
def cleanUp(self):
super(TwoStepPicker, self).cleanUp()
self.cleanUpProps()
def cleanUpProps(self):
for prop in self.props:
self.widget.renderer.RemoveViewProp(prop)
for prop in self.overlayProps:
self.widget.rendererOverlay.RemoveViewProp(prop)
self.props = []
self.overlayProps = []
self.lineActor = None
self.sphereSource = None
@Slot(float)
def histogramUpdatedPosition(self, position):
if not self.lineActor:
return
lineSource = self.lineActor.GetMapper().GetInputConnection(0, 0).GetProducer()
p1 = lineSource.GetPoint1()
p2 = lineSource.GetPoint2()
part = Add(p1, Multiply(Subtract(p2, p1), position))
self.sphereSource.SetCenter(part[0], part[1], part[2])
self.assemblyFollower.SetPosition(part[0], part[1], part[2])
self.widget.render()
def camPosition(self):
return self.widget.renderer.GetActiveCamera().GetPosition()
def mouseMove(self, iren, event=""):
"""
vtk action callback
"""
if not self.lineActor:
# TODO: show crosshair or some other thing instead of cursor
self.widget.rwi.ShowCursor()
return
x, y = iren.GetEventPosition()
q1, q2 = rayForMouse(self.widget.renderer, x, y)
lineSource = self.lineActor.GetMapper().GetInputConnection(0, 0).GetProducer()
p1 = lineSource.GetPoint1() # Volume entry point
p2 = lineSource.GetPoint2() # Volume exit point
# location is the closest point on the drawn line
location, other = ClosestPoints(p1, p2, q1, q2, clamp=True)
lengthToLocation = Length(Subtract(location, p1))
lengthRay = Length(Subtract(p2, p1))
locationRatio = lengthToLocation / lengthRay
# If shift key is pushed in, try to snap to logical points along the ray
if iren.GetShiftKey() != 0:
# Get the index of the sample that is closest to the point on the ray
sampleIndex = int(len(self.sampleDiffs) * locationRatio)
# Sample size is the amount of samples before and after the sample index
# that are going to be analyzed
sampleSize = 10
# Calculate the lower and upper bound index
lowerBoundIndex = max(0, sampleIndex-sampleSize)
upperBoundIndex = min(len(self.sampleDiffs), sampleIndex+sampleSize)
samples = self.sampleDiffs[lowerBoundIndex:upperBoundIndex]
# Create a penalty for the local samples that gives penalties to samples
# that lay further away from the mouse
penalty = [(sampleSize + 1) / float(1 + abs(i-sampleSize)) for i in range(2*sampleSize+1)]
offset = lowerBoundIndex - (sampleIndex - sampleSize)
resamples = []
for i in range(len(samples)):
resamples.append(samples[i] * penalty[i+offset])
maxIndex = resamples.index(max(resamples))
# lowerBoundIndex is the index of where the sampling starts
# maxIndex is number that counts from the lowerBoundIndex
locationIndex = lowerBoundIndex + maxIndex
locationRatio = locationIndex / float(len(self.sampleDiffs))
location = Add(p1, Multiply(Subtract(p2, p1), locationRatio))
if not self.sphereSource:
bounds = self.widget.imageData.GetBounds()
sizes = [bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]]
smallest = min(sizes)
scale = smallest / 30
self.sphereSource = CreateSphere(scale, [0.2, 1, 0.5])
self._addToRender(self.sphereSource)
self._createLocator()
self.sphereSource.SetCenter(location[0], location[1], location[2])
self.assemblyFollower.SetPosition(location[0], location[1], location[2])
self.locatorUpdated.emit(locationRatio)
self.widget.render()
def keyPress(self, iren, event=""):
"""
vtk action callback
"""
key = iren.GetKeyCode()
if key != "a" and key != " ":
# if key == " ":
# print "Pressed space"
return
x, y = iren.GetEventPosition()
# p1 and p2 are in world coordination
p1, p2 = rayForMouse(self.widget.renderer, x, y)
camPos = self.camPosition()
q1, q2 = sortedLocations(p1, p2, camPos)
if not self.lineActor:
self._setLine(q1, q2)
else:
self._pickPosition()
def _pickPosition(self):
# point in world coordinates
point = list(self.sphereSource.GetCenter())
transform = TransformWithMatrix(self.widget.volume.GetMatrix())
transform.Inverse()
# transformedPoint in local coordinates
tranformedPoint = transform.TransformPoint(point)
point = list(tranformedPoint)
self.cleanUpProps()
self.pickedLocation.emit(point)
self.widget.render()
def pickedPosition(self):
"""
Position is float between 0 and 1
"""
self._pickPosition()
def _setLine(self, point1, point2):
"""
Input points should be world coordinates.
"""
bounds = list(self.widget.imageData.GetBounds())
transform = TransformWithMatrix(self.widget.volume.GetMatrix())
intersections = intersectionsWithBounds(bounds, transform, point1, point2)
if not intersections:
return
sortedIntersections = sortedLocations(intersections[0], intersections[1], self.camPosition())
# Draw line in renderer and in overlay renderer in world coordinates
self.lineActor = CreateLine(sortedIntersections[0], sortedIntersections[1])
self._addToRender(self.lineActor)
self.lineActorOverlay = CreateLine(sortedIntersections[0], sortedIntersections[1])
self.lineActorOverlay.GetProperty().SetColor(1.0, 1.0, 1.0)
self.lineActorOverlay.GetProperty().SetOpacity(0.5)
self.lineActorOverlay.GetProperty().SetLineStipplePattern(0xf0f0)
self.lineActorOverlay.GetProperty().SetLineStippleRepeatFactor(1)
self._addToOverlay(self.lineActorOverlay)
self.widget.render()
# Sample volume for ray profile: should be done in local coordinates, so the intersections
# have to be transformed again
transform.Inverse()
localIntersects = map(lambda x: list(transform.TransformPoint(x[0], x[1], x[2])), sortedIntersections)
# ab is vector pointing from localIntersects[0] to localIntersects[1]
ab = Subtract(localIntersects[1], localIntersects[0])
abLength = Length(ab)
nrOfSteps = 256 # TODO: make this number dependent on data size and length of vector
stepLength = abLength / float(nrOfSteps)
abNorm = Normalize(ab)
abStep = Multiply(abNorm, stepLength)
sampleLoc = localIntersects[0]
interpolator = vtkImageInterpolator()
interpolator.Initialize(self.widget.imageData)
self.samples = []
for i in range(nrOfSteps + 1):
# Get sample from volume
self.samples.append(interpolator.Interpolate(sampleLoc[0], sampleLoc[1], sampleLoc[2], 0))
# Update the sampling position
sampleLoc = Add(sampleLoc, abStep)
self.propertiesWidget.setSamples(self.samples, self.widget.imageData.GetScalarRange())
self._analyzeSamples(self.samples)
def _addToRender(self, prop):
self.widget.renderer.AddViewProp(prop)
self.props.append(prop)
def _addToOverlay(self, prop):
self.widget.rendererOverlay.AddViewProp(prop)
self.overlayProps.append(prop)
def _createLocator(self):
bounds = self.widget.imageData.GetBounds()
sizes = [bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4]]
smallest = min(sizes)
multiplier = smallest / 30
halfSize = 4 * multiplier
gapSize = 2 * multiplier
upLine = CreateLine([0, gapSize, 0], [0, gapSize+halfSize, 0])
downLine = CreateLine([0, -gapSize, 0], [0, -(gapSize+halfSize), 0])
rightLine = CreateLine([gapSize, 0, 0], [gapSize+halfSize, 0, 0])
leftLine = CreateLine([-gapSize, 0, 0], [-(gapSize+halfSize), 0, 0])
assembly = vtkAssembly()
assembly.AddPart(upLine)
assembly.AddPart(downLine)
assembly.AddPart(leftLine)
assembly.AddPart(rightLine)
self.assemblyFollower = vtkProp3DFollower()
self.assemblyFollower.SetProp3D(assembly)
self.assemblyFollower.SetCamera(self.widget.renderer.GetActiveCamera())
self._addToOverlay(self.assemblyFollower)
self.widget.render()
def _analyzeSamples(self, samples):
self.sampleDiffs = []
for index in range(len(samples)-1):
sample = samples[index]
nextSample = samples[index+1]
self.sampleDiffs.append(abs(nextSample - sample))
def rayForMouse(renderer, selectionX, selectionY):
"""
Code taken from vtkPicker::Pick()
Returns two points in world coordination.
"""
# Get camera focal point and position. Convert to display (screen)
# coordinates. We need a depth value for z-buffer.
camera = renderer.GetActiveCamera()
camPosition = list(camera.GetPosition())
camPosition.append(1.0)
cameraFP = list(camera.GetFocalPoint())
cameraFP.append(1.0)
renderer.SetWorldPoint(cameraFP[0], cameraFP[1], cameraFP[2], cameraFP[3])
renderer.WorldToDisplay()
displayCoords = renderer.GetDisplayPoint()
selectionZ = displayCoords[2]
# Convert the selection point into world coordinates.
renderer.SetDisplayPoint(selectionX, selectionY, selectionZ)
renderer.DisplayToWorld()
worldCoords = renderer.GetWorldPoint()
pickPosition = []
for index in range(3):
pickPosition.append(worldCoords[index] / worldCoords[3])
# Compute the ray endpoints. The ray is along the line running from
# the camera position to the selection point, starting where this line
# intersects the front clipping plane, and terminating where this
# line intersects the back clipping plane.
ray = []
for index in range(3):
ray.append(pickPosition[index] - camPosition[index])
cameraDOP = []
for index in range(3):
cameraDOP.append(cameraFP[index] - camPosition[index])
vtkMath.Normalize(cameraDOP)
rayLength = vtkMath.Dot(cameraDOP, ray)
clipRange = camera.GetClippingRange()
tF = clipRange[0] / rayLength
tB = clipRange[1] / rayLength
p1World = []
p2World = []
for index in range(3):
p1World.append(camPosition[index] + tF*ray[index])
p2World.append(camPosition[index] + tB*ray[index])
# TODO: clip the line just outside the volume
return p1World, p2World
def sortedLocations(p1, p2, camPos):
# Sort q1 and q2 based on camera position
# Calculate distance between the two points and the camera
diff1 = Subtract(p1, camPos)
diff2 = Subtract(p2, camPos)
len1 = Length(diff1)
len2 = Length(diff2)
if len1 < len2:
return p1, p2
else:
return p2, p1
def intersectionsWithBounds(bounds, transform, point1, point2):
# Create points for all of the corners of the bounds
p = [[0 for x in range(3)] for x in range(8)]
p[0] = [bounds[0], bounds[2], bounds[4]]
p[1] = [bounds[1], bounds[2], bounds[4]]
p[2] = [bounds[0], bounds[3], bounds[4]]
p[3] = [bounds[0], bounds[2], bounds[5]]
p[4] = [bounds[1], bounds[3], bounds[4]]
p[5] = [bounds[0], bounds[3], bounds[5]]
p[6] = [bounds[1], bounds[2], bounds[5]]
p[7] = [bounds[1], bounds[3], bounds[5]]
# Transform corner points
tp = map(lambda x: list(transform.TransformPoint(x[0], x[1], x[2])), p)
# Create triangles for each face of the cube
triangles = []
triangles.append([tp[0], tp[1], tp[4]])
triangles.append([tp[0], tp[2], tp[4]])
triangles.append([tp[0], tp[2], tp[5]])
triangles.append([tp[0], tp[3], tp[5]])
triangles.append([tp[0], tp[1], tp[6]])
triangles.append([tp[0], tp[3], tp[6]])
triangles.append([tp[7], tp[6], tp[3]])
triangles.append([tp[7], tp[5], tp[3]])
triangles.append([tp[7], tp[5], tp[2]])
triangles.append([tp[7], tp[4], tp[2]])
triangles.append([tp[7], tp[6], tp[1]])
triangles.append([tp[7], tp[4], tp[1]])
# Check intersection for each triangle
result = map(lambda x: LineIntersectionWithTriangle(point1, point2, x), triangles)
intersections = [x[1] for x in result if x[0]]
assert len(intersections) == 2 or len(intersections) == 0
if len(intersections) == 2:
return intersections
else:
return None
| mit |
pycket/pycket | pycket/util.py | 2 | 11486 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import inspect
import string
from rpython.rlib import streamio as sio
from rpython.rlib.listsort import make_timsort_class
from rpython.rlib import jit, objectmodel, rtime
from rpython.rlib.unroll import unrolling_iterable
def os_check_env_var(var_str):
from pycket.env import w_global_config
return w_global_config.env_var_exists(var_str)
def os_get_env_var(var_str):
from pycket.env import w_global_config
return w_global_config.get_env_var(var_str)
##############################################
# Performance Region
##############################################
## this code is a port of cs/linklet/performance.ss
class PerfRegion(object):
def __init__(self, l):
self.label = l
def __enter__(self):
start_perf_region(self.label)
def __exit__(self,a,b,c):
finish_perf_region(self.label)
class PerfRegionCPS(PerfRegion):
def __exit__(self,a,b,c):
if a is None:
# normal return, except that this is a CPS function and so the
# finish_perf_region call is in the continuation
# If `with` gave us access to the return value we could do this
# automatically here, but it doesn't
pass
else:
# exception, so we have to call finish_perf_region
finish_perf_region(self.label)
# re-raise the exception
return None # using False here confuses rtyper
def start_perf_region(label):
from pycket.prims.general import current_gc_time
if os_check_env_var("PLT_LINKLET_TIMES"):
linklet_perf.current_start_time.append(rtime.time())
linklet_perf.current_gc_start_time.append(current_gc_time())
def finish_perf_region(label):
from pycket.prims.general import current_gc_time
if os_check_env_var("PLT_LINKLET_TIMES"):
assert (len(linklet_perf.current_start_time) > 0)
delta = rtime.time() - linklet_perf.current_start_time[-1]
delta_gc = current_gc_time() - linklet_perf.current_gc_start_time[-1]
table_add(linklet_perf.region_times, label, delta)
table_add(linklet_perf.region_gc_times, label, delta_gc)
table_add(linklet_perf.region_counts, label, 1)
linklet_perf.current_start_time.pop()
linklet_perf.current_gc_start_time.pop()
for i in range(len(linklet_perf.current_start_time)):
linklet_perf.current_start_time[i] += delta
linklet_perf.current_gc_start_time[i] += delta_gc
class LinkletPerf(object):
def __init__(self):
self.region_times = {}
self.region_gc_times = {}
self.region_counts = {}
self.current_start_time = []
self.current_gc_start_time = []
self.name_len = 0
self.total = 0
self.total_gc = 0
self.total_len = 0
self.total_gc_len = 0
self.region_subs = {}
self.region_gc_subs = {}
self.categories = {"read" : ["fasl->s-exp", "s-exp->ast", "assign-convert-deserialize"],
"run" : ["instantiate-linklet" "outer"],
"startup" : ["expander-linklet", "json-load", "json-to-ast",
"fasl-linklet", "set-params"],
"compile" : ["compile-linklet", "compile-sexp-to-ast",
"compile-normalize", "compile-assign-convert",
]}
def init(self):
self.region_times["boot"] = rtime.clock()
self.region_gc_times["boot"] = 0
def report_time(self, level, label, n, gc_ht):
counts = self.region_counts.get(label,0)
assert not(isinstance(counts,str))
if counts == 0:
c = ""
else:
c = " ; %d times"%int(counts)
self.report(level, label, n,
" [%s]"%(pad_left(str(int(gc_ht.get(label, 0))), self.total_gc_len)),
"ms", c)
def report(self, level, label, n, nextra, units, extra):
lprintf(";; %s%s%s %s%s %s%s\n",
(spaces(level*2),
pad_right(label,self.name_len),
spaces((3-level) * 2),
pad_left(str(n),self.total_len),
nextra,
units,
extra))
def loop(self, ht, gc_ht, level):
for label in ht: # Fixme I can't sort
self.report_time(level, label, int(1000*ht[label]), gc_ht)
sub_ht = self.region_subs.get(label, None)
sub_gc_ht = self.region_gc_subs.get(label, None)
if sub_ht:
self.loop(sub_ht, sub_gc_ht, level+1)
def print_report(self):
if os_check_env_var("PLT_LINKLET_TIMES"):
total = 0
total_gc = 0
self.name_len = 0
for k in self.region_times:
self.name_len = max(self.name_len,len(k))
total += self.region_times[k]
total_gc += self.region_gc_times[k]
self.total = int(1000*total)
self.total_gc = int(total_gc)
self.total_len = len(str(total))
self.total_gc_len = len(str(self.total_gc))
for cat in self.categories:
t = sum_values(self.region_times, self.categories[cat], cat, self.region_subs)
t_gc = sum_values(self.region_gc_times, self.categories[cat], cat, self.region_gc_subs)
if not(0 == t) and not(0 == t_gc):
self.region_times[cat] = t
self.region_gc_times[cat] = t_gc
self.loop(self.region_times, self.region_gc_times, 0)
self.report(0, "total", self.total, " [%s]"%self.total_gc, "ms", "")
linklet_perf = LinkletPerf()
def second(l,i):
x,y = l[i]
return y
Sorter = make_timsort_class(getitem=second)
def ht_to_sorted_list(ht):
l = []
for k,v in enumerate(ht):
l.append((k,v))
s = Sorter(l)
s.sort()
return l
def table_add(t, l, v):
t.setdefault(l,0)
t[l] += v
def lprintf(fmt, args):
from pycket.prims.logging import w_main_logger
sio.fdopen_as_stream(2, "w", buffering=1).write(fmt%args)
return
def spaces(n):
return " "*n
def pad_left(v,w):
s = v
return spaces(max(0, w - (len(s)))) + (s)
def pad_right(v,w):
s = str(v)
return s + (spaces(max(0, w - (len(s)))))
def sum_values(ht, keys, key, subs):
sub_ht = {}
subs[key] = sub_ht
sum = 0
for sub_key in keys:
v = ht.get(sub_key,0)
sum += v
sub_ht[sub_key] = v
if sub_key in ht:
del ht[sub_key]
return sum
##############################################
# Debug Outputs
##############################################
def active_break():
from pycket.env import w_global_config as glob
if glob.is_debug_active():
import pdb;pdb.set_trace()
def active_log(print_str, given_verbosity_level=0, debug=False, keyword=""):
from pycket.env import w_global_config as glob
if glob.is_debug_active():
console_log(print_str, given_verbosity_level, debug, keyword)
def console_log_after_boot(print_str, given_verbosity_level=0, debug=False, keyword=""):
from pycket.env import w_global_config as glob
if glob.is_boot_completed():
console_log(print_str, given_verbosity_level, debug, keyword)
def console_log(print_str, given_verbosity_level=0, debug=False, keyword=""):
# use the given_verbosity_level argument to control at which level
# of verbosity you want this log to appear. Default is -1.
# i.e. For a console log with a given_verbosity_level = 5 , the
# user has to give "--verbose 5" at the entry for it to
# appear. Note that in that case all the logs with less than 5
# verbosity_levels will also appear.
# use the debug parameter to print irregardless of the
# verbosity_levels. You can use this to print only the specific
# logs you want. Don't provide "--verbose" flag at the entry, and
# only the ones having debug=True will appear.
# The keyword parameterizes the logging. Each console_log can have
# a keyword associated with it. Instead of a numeric
# verbosity_level, a string keyword can be supplied at the boot
# with the --verbose flag, and the console_logs that have that
# keyword will appear at runtime.
# The keywords and numeric verbosity_levels are not mutually
# exclusive. Both can be used together at the same time with
# multiple --verbose flags:
# $$ pycket --verbose 2 --verbose keyword
# Mutliple keywords can be supplied too:
# $$ pycket --verbose regexp --verbose prims --verbose 2
from pycket.env import w_global_config as glob
current_v_level = glob.get_config_val('verbose')
if given_verbosity_level <= current_v_level or debug or glob.is_keyword_active(keyword):
current_str = str(rtime.time()) # str will trim it to 2 decimals
decimal = len(current_str.split(".")[1])
# decimal cannot be 0, since we know rtime.time() will always
# return float(...), so it can actually be either 1 or 2
if decimal == 1:
current_str += "0"
print("[%s] %s" % (current_str, print_str))
##############################################
# MISC
##############################################
def snake_case(str):
if not str:
return str
first = str[0]
last = str[-1]
body = str[1:-1]
new = []
for b in body:
if b in string.uppercase:
new.append("_" + b.lower())
else:
new.append(b)
return first.lower() + "".join(new) + last.lower()
def memoize(f):
cache = {}
def wrapper(*val):
if objectmodel.we_are_translated():
return f(*val)
lup = cache.get(val, None)
if lup is None:
lup = f(*val)
cache[val] = lup
return lup
wrapper.__name__ = "Memoized(%s)" % f.__name__
return wrapper
# Add a `make` method to a given class which memoizes constructor invocations.
def memoize_constructor(cls):
setattr(cls, "make", staticmethod(memoize(cls)))
return cls
def strip_immutable_field_name(str):
return str.replace("[*]", "")
def add_copy_method(copy_method="copy"):
def wrapper(cls):
"""
This attempts to produce a method which will copy the immutable contents of
a given data type from the '_immutable_fields_' annotation of the class.
The methods employed here will only work for certain types of class
specifications (i.e. only works if all the desired fields are present in the
'_immutable_fields_' annotation of the class definition).
The mutable fields of the class must be copied separately as well.
"""
field_names = []
for base in inspect.getmro(cls):
if base is object:
continue
fields = getattr(base, "_immutable_fields_", [])
field_names.extend(map(strip_immutable_field_name, fields))
field_names = unrolling_iterable(field_names)
def copy(self):
result = objectmodel.instantiate(cls)
for attr in field_names:
val = getattr(self, attr)
setattr(result, attr, val)
return result
setattr(cls, copy_method, copy)
return cls
return wrapper
| mit |
saullocastro/compmech | theory/panel/kpanels/kpanelt/kpanelt_fsdt_donnell_bc4/print_nonlinear_sparse.py | 3 | 7425 | import os
import glob
import numpy as np
import sympy
from sympy import pi, sin, cos, var, Matrix
var('i1, j1, k1, l1', integer=True)
var('x, t, xa, xb, tmin, tmax, L, r, sina, cosa')
var('A11, A12, A16, A22, A26, A66, A44, A45, A55')
var('B11, B12, B16, B22, B26, B66')
var('D11, D12, D16, D22, D26, D66')
var('wx, wt, w0x, w0t, Nxx, Ntt, Nxt')
var('p00, p01, p02, p03, p04, p05, p06, p07')
var('p10, p11, p12, p13, p14, p15, p16, p17')
var('p20, p21, p22, p23, p24, p25, p26, p27')
var('p30, p31, p32, p33, p34, p35, p36, p37')
var('p40, p41, p42, p43, p44, p45, p46, p47')
var('p50, p51, p52, p53, p54, p55, p56, p57')
var('p60, p61, p62, p63, p64, p65, p66, p67')
var('p70, p71, p72, p73, p74, p75, p76, p77')
var('q00, q01, q02, q03, q04, q05, q06, q07')
var('q10, q11, q12, q13, q14, q15, q16, q17')
var('q20, q21, q22, q23, q24, q25, q26, q27')
var('q30, q31, q32, q33, q34, q35, q36, q37')
var('q40, q41, q42, q43, q44, q45, q46, q47')
var('q50, q51, q52, q53, q54, q55, q56, q57')
var('q60, q61, q62, q63, q64, q65, q66, q67')
var('q70, q71, q72, q73, q74, q75, q76, q77')
p = Matrix([[p00, p01, p02, p03, p04, p05, p06, p07],
[p10, p11, p12, p13, p14, p15, p16, p17],
[p20, p21, p22, p23, p24, p25, p26, p27],
[p30, p31, p32, p33, p34, p35, p36, p37],
[p40, p41, p42, p43, p44, p45, p46, p47],
[p50, p51, p52, p53, p54, p55, p56, p57],
[p60, p61, p62, p63, p64, p65, p66, p67],
[p70, p71, p72, p73, p74, p75, p76, p77]])
pstr = np.array([str(i) for i in p]).reshape(8, 8)
q = Matrix([[q00, q01, q02, q03, q04, q05, q06, q07],
[q10, q11, q12, q13, q14, q15, q16, q17],
[q20, q21, q22, q23, q24, q25, q26, q27],
[q30, q31, q32, q33, q34, q35, q36, q37],
[q40, q41, q42, q43, q44, q45, q46, q47],
[q50, q51, q52, q53, q54, q55, q56, q57],
[q60, q61, q62, q63, q64, q65, q66, q67],
[q70, q71, q72, q73, q74, q75, q76, q77]])
qstr = np.array([str(i) for i in q]).reshape(8, 8)
var('sinj1_bt, cosj1_bt')
var('cosl1_bt')
var('sini1bx, cosi1bx')
var('sinj1bt, cosj1bt')
var('sink1bx, cosk1bx')
var('sinl1bt, cosl1bt')
subs = {
sin(pi*j1*(t-tmin)/(tmax-tmin)): sinj1bt,
cos(pi*j1*(t-tmin)/(tmax-tmin)): cosj1bt,
sin(pi*l1*(t-tmin)/(tmax-tmin)): sinl1bt,
cos(pi*l1*(t-tmin)/(tmax-tmin)): cosl1bt,
sin(pi*j1*(t-tmin)/(-tmax+tmin)): sinj1_bt,
cos(pi*j1*(t-tmin)/(-tmax+tmin)): cosj1_bt,
cos(pi*l1*(t-tmin)/(-tmax+tmin)): cosl1_bt,
sin(pi*k1*(0.5*L+x)/L): sink1bx,
cos(pi*k1*(0.5*L+x)/L): cosk1bx,
sin(pi*i1*(0.5*L+x)/L): sini1bx,
cos(pi*i1*(0.5*L+x)/L): cosi1bx,
}
def List(*e):
return list(e)
# define the values for pij and qij
# print these values
# print the formulas for k0L etc for each case based on pij and qij
valid = {}
for i, sufix in enumerate(['p', 'q']):
if sufix=='p':
pqstr = pstr
pq = p
elif sufix=='q':
pqstr = qstr
pq = q
for filepath in glob.glob(r'.\nonlinear_mathematica\fortran*cone*'+sufix+r'*.txt'):
print filepath
with open(filepath) as f:
print_str = ''
filename = os.path.basename(filepath)
names = filename[:-4].split('_') # removing .txt
# k0Lp, k0Lq, kGp, kGq, kLLp or kLLq
matrix_name = names[2]
sub_matrix = names[3][i] # 0, 1, or 2
key = matrix_name[:-1] + '_' + names[3]
if not key in valid.keys():
valid[key] = set()
lines = [line.strip() for line in f.readlines()]
string = ''.join(lines)
string = string.replace('Pi','pi')
string = string.replace('Sin','sin')
string = string.replace('Cos','cos')
string = string.replace('\\','')
tmp = eval(string)
matrix = Matrix(tmp)
print_str += '{0}_{1}\n'.format(matrix_name, sub_matrix)
for (row, col), value in np.ndenumerate(matrix):
if value:
value = value.subs(subs)
valid[key].add(pq[row, col])
print_str += '{0} = {1}\n'.format(pqstr[row,col], value)
print_str += '\n# create buffer\n'
print_str += '{0}_{1}\n'.format(matrix_name, sub_matrix)
for (row,col), value in np.ndenumerate(matrix):
if value:
value = value.subs(subs)
valid[key].add(pq[row, col])
print_str += '{0}_{1}_{2}[pos] = {2}\n'.format(
matrix_name, sub_matrix, pqstr[row,col])
print_str += '\n# access buffer\n'
for (row,col), value in np.ndenumerate(matrix):
if value:
value = value.subs(subs)
valid[key].add(pq[row, col])
print_str += '{2} = {0}_{1}_{2}[pos]\n'.format(
matrix_name, sub_matrix, pqstr[row,col])
print_str += '\nsubs\n\n'
for k, value in subs.items():
print_str += '{0} = {1}\n'.format(k, value)
names[3] = sub_matrix
filename = '_'.join(names) + '.txt'
with open('.\\nonlinear_sparse\\' + filename, 'w') as f:
f.write(print_str)
l1 = glob.glob(r'.\nonlinear_mathematica\fortran*cone_k0L_*.txt')
l2 = glob.glob(r'.\nonlinear_mathematica\fortran*cone_kG_*.txt')
l3 = glob.glob(r'.\nonlinear_mathematica\fortran*cone_kLL_*.txt')
for filepath in (l1 + l2 + l3):
print filepath
with open(filepath) as f:
print_str = ''
filename = os.path.basename(filepath)
names = filename[:-4].split('_') # removing .txt
# k0L, kG or kLL
matrix_name = names[2]
sub_matrix = names[3] # 00, 01, 02, 11, 12 or 22
left, right = sub_matrix
key = matrix_name + '_' + sub_matrix
lines = [line.strip() for line in f.readlines()]
string = ''.join(lines)
#string = string.replace('List','')
string = string.replace('Pi','pi')
string = string.replace('Sin','sin')
string = string.replace('Cos','cos')
string = string.replace('\\','')
tmp = eval(string)
matrix = Matrix(tmp)
tmp = ''
count = 0
for (row, col), value in np.ndenumerate(matrix):
for s in value.free_symbols:
if not s in valid[key]:
value = value.subs({s:0})
if value:
count += 1
tmp += 'c += 1\n'
if int(left)==0:
tmp += 'rows[c] = {}\n'.format(row)
else:
tmp += 'rows[c] = row+{}\n'.format(row)
if int(right)==0:
tmp += 'cols[c] = {}\n'.format(col)
else:
tmp += 'cols[c] = col+{}\n'.format(col)
tmp += 'out[c] = beta*out[c] + alpha*({1})\n'.format(
matrix_name, value)
print_str += '{0}_{1} with {2} non-null terms\n'.format(
matrix_name, sub_matrix, count)
print_str += tmp
names[3] = sub_matrix
filename = '_'.join(names) + '.txt'
with open('.\\nonlinear_sparse\\' + filename, 'w') as f:
f.write(print_str)
| bsd-3-clause |
maxamillion/ansible | test/lib/ansible_test/_data/sanity/validate-modules/validate_modules/main.py | 13 | 105861 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <matt@sivel.net>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import abc
import argparse
import ast
import datetime
import json
import errno
import os
import re
import subprocess
import sys
import tempfile
import traceback
from collections import OrderedDict
from contextlib import contextmanager
from ansible.module_utils.compat.version import StrictVersion, LooseVersion
from fnmatch import fnmatch
import yaml
from ansible import __version__ as ansible_version
from ansible.executor.module_common import REPLACER_WINDOWS
from ansible.module_utils.common._collections_compat import Mapping
from ansible.module_utils.common.parameters import DEFAULT_TYPE_VALIDATORS
from ansible.plugins.loader import fragment_loader
from ansible.utils.collection_loader._collection_finder import _AnsibleCollectionFinder
from ansible.utils.plugin_docs import REJECTLIST, add_collection_to_versions_and_dates, add_fragments, get_docstring
from ansible.utils.version import SemanticVersion
from .module_args import AnsibleModuleImportError, AnsibleModuleNotInitialized, get_argument_spec
from .schema import ansible_module_kwargs_schema, doc_schema, return_schema
from .utils import CaptureStd, NoArgsAnsibleModule, compare_unordered_lists, is_empty, parse_yaml, parse_isodate
from voluptuous.humanize import humanize_error
from ansible.module_utils.six import PY3, with_metaclass, string_types
if PY3:
# Because there is no ast.TryExcept in Python 3 ast module
TRY_EXCEPT = ast.Try
# REPLACER_WINDOWS from ansible.executor.module_common is byte
# string but we need unicode for Python 3
REPLACER_WINDOWS = REPLACER_WINDOWS.decode('utf-8')
else:
TRY_EXCEPT = ast.TryExcept
REJECTLIST_DIRS = frozenset(('.git', 'test', '.github', '.idea'))
INDENT_REGEX = re.compile(r'([\t]*)')
TYPE_REGEX = re.compile(r'.*(if|or)(\s+[^"\']*|\s+)(?<!_)(?<!str\()type\([^)].*')
SYS_EXIT_REGEX = re.compile(r'[^#]*sys.exit\s*\(.*')
NO_LOG_REGEX = re.compile(r'(?:pass(?!ive)|secret|token|key)', re.I)
REJECTLIST_IMPORTS = {
'requests': {
'new_only': True,
'error': {
'code': 'use-module-utils-urls',
'msg': ('requests import found, should use '
'ansible.module_utils.urls instead')
}
},
r'boto(?:\.|$)': {
'new_only': True,
'error': {
'code': 'use-boto3',
'msg': 'boto import found, new modules should use boto3'
}
},
}
SUBPROCESS_REGEX = re.compile(r'subprocess\.Po.*')
OS_CALL_REGEX = re.compile(r'os\.call.*')
LOOSE_ANSIBLE_VERSION = LooseVersion('.'.join(ansible_version.split('.')[:3]))
def is_potential_secret_option(option_name):
if not NO_LOG_REGEX.search(option_name):
return False
# If this is a count, type, algorithm, timeout, filename, or name, it is probably not a secret
if option_name.endswith((
'_count', '_type', '_alg', '_algorithm', '_timeout', '_name', '_comment',
'_bits', '_id', '_identifier', '_period', '_file', '_filename',
)):
return False
# 'key' also matches 'publickey', which is generally not secret
if any(part in option_name for part in (
'publickey', 'public_key', 'keyusage', 'key_usage', 'keyserver', 'key_server',
'keysize', 'key_size', 'keyservice', 'key_service', 'pub_key', 'pubkey',
'keyboard', 'secretary',
)):
return False
return True
def compare_dates(d1, d2):
try:
date1 = parse_isodate(d1, allow_date=True)
date2 = parse_isodate(d2, allow_date=True)
return date1 == date2
except ValueError:
# At least one of d1 and d2 cannot be parsed. Simply compare values.
return d1 == d2
class ReporterEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Exception):
return str(o)
return json.JSONEncoder.default(self, o)
class Reporter:
def __init__(self):
self.files = OrderedDict()
def _ensure_default_entry(self, path):
try:
self.files[path]
except KeyError:
self.files[path] = {
'errors': [],
'warnings': [],
'traces': [],
'warning_traces': []
}
def _log(self, path, code, msg, level='error', line=0, column=0):
self._ensure_default_entry(path)
lvl_dct = self.files[path]['%ss' % level]
lvl_dct.append({
'code': code,
'msg': msg,
'line': line,
'column': column
})
def error(self, *args, **kwargs):
self._log(*args, level='error', **kwargs)
def warning(self, *args, **kwargs):
self._log(*args, level='warning', **kwargs)
def trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['traces'].append(tracebk)
def warning_trace(self, path, tracebk):
self._ensure_default_entry(path)
self.files[path]['warning_traces'].append(tracebk)
@staticmethod
@contextmanager
def _output_handle(output):
if output != '-':
handle = open(output, 'w+')
else:
handle = sys.stdout
yield handle
handle.flush()
handle.close()
@staticmethod
def _filter_out_ok(reports):
temp_reports = OrderedDict()
for path, report in reports.items():
if report['errors'] or report['warnings']:
temp_reports[path] = report
return temp_reports
def plain(self, warnings=False, output='-'):
"""Print out the test results in plain format
output is ignored here for now
"""
ret = []
for path, report in Reporter._filter_out_ok(self.files).items():
traces = report['traces'][:]
if warnings and report['warnings']:
traces.extend(report['warning_traces'])
for trace in traces:
print('TRACE:')
print('\n '.join((' %s' % trace).splitlines()))
for error in report['errors']:
error['path'] = path
print('%(path)s:%(line)d:%(column)d: E%(code)s %(msg)s' % error)
ret.append(1)
if warnings:
for warning in report['warnings']:
warning['path'] = path
print('%(path)s:%(line)d:%(column)d: W%(code)s %(msg)s' % warning)
return 3 if ret else 0
def json(self, warnings=False, output='-'):
"""Print out the test results in json format
warnings is not respected in this output
"""
ret = [len(r['errors']) for r in self.files.values()]
with Reporter._output_handle(output) as handle:
print(json.dumps(Reporter._filter_out_ok(self.files), indent=4, cls=ReporterEncoder), file=handle)
return 3 if sum(ret) else 0
class Validator(with_metaclass(abc.ABCMeta, object)):
"""Validator instances are intended to be run on a single object. if you
are scanning multiple objects for problems, you'll want to have a separate
Validator for each one."""
def __init__(self, reporter=None):
self.reporter = reporter
@abc.abstractproperty
def object_name(self):
"""Name of the object we validated"""
pass
@abc.abstractproperty
def object_path(self):
"""Path of the object we validated"""
pass
@abc.abstractmethod
def validate(self):
"""Run this method to generate the test results"""
pass
class ModuleValidator(Validator):
REJECTLIST_PATTERNS = ('.git*', '*.pyc', '*.pyo', '.*', '*.md', '*.rst', '*.txt')
REJECTLIST_FILES = frozenset(('.git', '.gitignore', '.travis.yml',
'.gitattributes', '.gitmodules', 'COPYING',
'__init__.py', 'VERSION', 'test-docs.sh'))
REJECTLIST = REJECTLIST_FILES.union(REJECTLIST['MODULE'])
PS_DOC_REJECTLIST = frozenset((
'async_status.ps1',
'slurp.ps1',
'setup.ps1'
))
# win_dsc is a dynamic arg spec, the docs won't ever match
PS_ARG_VALIDATE_REJECTLIST = frozenset(('win_dsc.ps1', ))
ACCEPTLIST_FUTURE_IMPORTS = frozenset(('absolute_import', 'division', 'print_function'))
def __init__(self, path, analyze_arg_spec=False, collection=None, collection_version=None,
base_branch=None, git_cache=None, reporter=None, routing=None):
super(ModuleValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(self.path)
self.name = os.path.splitext(self.basename)[0]
self.analyze_arg_spec = analyze_arg_spec
self._Version = LooseVersion
self._StrictVersion = StrictVersion
self.collection = collection
self.collection_name = 'ansible.builtin'
if self.collection:
self._Version = SemanticVersion
self._StrictVersion = SemanticVersion
collection_namespace_path, collection_name = os.path.split(self.collection)
self.collection_name = '%s.%s' % (os.path.basename(collection_namespace_path), collection_name)
self.routing = routing
self.collection_version = None
if collection_version is not None:
self.collection_version_str = collection_version
self.collection_version = SemanticVersion(collection_version)
self.base_branch = base_branch
self.git_cache = git_cache or GitCache()
self._python_module_override = False
with open(path) as f:
self.text = f.read()
self.length = len(self.text.splitlines())
try:
self.ast = ast.parse(self.text)
except Exception:
self.ast = None
if base_branch:
self.base_module = self._get_base_file()
else:
self.base_module = None
def _create_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return LooseVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._Version(v)
def _create_strict_version(self, v, collection_name=None):
if not v:
raise ValueError('Empty string is not a valid version')
if collection_name == 'ansible.builtin':
return StrictVersion(v)
if collection_name is not None:
return SemanticVersion(v)
return self._StrictVersion(v)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.base_module:
return
try:
os.remove(self.base_module)
except Exception:
pass
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def _get_collection_meta(self):
"""Implement if we need this for version_added comparisons
"""
pass
def _python_module(self):
if self.path.endswith('.py') or self._python_module_override:
return True
return False
def _powershell_module(self):
if self.path.endswith('.ps1'):
return True
return False
def _just_docs(self):
"""Module can contain just docs and from __future__ boilerplate
"""
try:
for child in self.ast.body:
if not isinstance(child, ast.Assign):
# allowed from __future__ imports
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
for future_import in child.names:
if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS:
break
else:
continue
return False
return True
except AttributeError:
return False
def _get_base_branch_module_path(self):
"""List all paths within lib/ansible/modules to try and match a moved module"""
return self.git_cache.base_module_paths.get(self.object_name)
def _has_alias(self):
"""Return true if the module has any aliases."""
return self.object_name in self.git_cache.head_aliased_modules
def _get_base_file(self):
# In case of module moves, look for the original location
base_path = self._get_base_branch_module_path()
command = ['git', 'show', '%s:%s' % (self.base_branch, base_path or self.path)]
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if int(p.returncode) != 0:
return None
t = tempfile.NamedTemporaryFile(delete=False)
t.write(stdout)
t.close()
return t.name
def _is_new_module(self):
if self._has_alias():
return False
return not self.object_name.startswith('_') and bool(self.base_branch) and not bool(self.base_module)
def _check_interpreter(self, powershell=False):
if powershell:
if not self.text.startswith('#!powershell\n'):
self.reporter.error(
path=self.object_path,
code='missing-powershell-interpreter',
msg='Interpreter line is not "#!powershell"'
)
return
if not self.text.startswith('#!/usr/bin/python'):
self.reporter.error(
path=self.object_path,
code='missing-python-interpreter',
msg='Interpreter line is not "#!/usr/bin/python"',
)
def _check_type_instead_of_isinstance(self, powershell=False):
if powershell:
return
for line_no, line in enumerate(self.text.splitlines()):
typekeyword = TYPE_REGEX.match(line)
if typekeyword:
# TODO: add column
self.reporter.error(
path=self.object_path,
code='unidiomatic-typecheck',
msg=('Type comparison using type() found. '
'Use isinstance() instead'),
line=line_no + 1
)
def _check_for_sys_exit(self):
# Optimize out the happy path
if 'sys.exit' not in self.text:
return
for line_no, line in enumerate(self.text.splitlines()):
sys_exit_usage = SYS_EXIT_REGEX.match(line)
if sys_exit_usage:
# TODO: add column
self.reporter.error(
path=self.object_path,
code='use-fail-json-not-sys-exit',
msg='sys.exit() call found. Should be exit_json/fail_json',
line=line_no + 1
)
def _check_gpl3_header(self):
header = '\n'.join(self.text.split('\n')[:20])
if ('GNU General Public License' not in header or
('version 3' not in header and 'v3.0' not in header)):
self.reporter.error(
path=self.object_path,
code='missing-gplv3-license',
msg='GPLv3 license header not found in the first 20 lines of the module'
)
elif self._is_new_module():
if len([line for line in header
if 'GNU General Public License' in line]) > 1:
self.reporter.error(
path=self.object_path,
code='use-short-gplv3-license',
msg='Found old style GPLv3 license header: '
'https://docs.ansible.com/ansible/devel/dev_guide/developing_modules_documenting.html#copyright'
)
def _check_for_subprocess(self):
for child in self.ast.body:
if isinstance(child, ast.Import):
if child.names[0].name == 'subprocess':
for line_no, line in enumerate(self.text.splitlines()):
sp_match = SUBPROCESS_REGEX.search(line)
if sp_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-popen',
msg=('subprocess.Popen call found. Should be module.run_command'),
line=(line_no + 1),
column=(sp_match.span()[0] + 1)
)
def _check_for_os_call(self):
if 'os.call' in self.text:
for line_no, line in enumerate(self.text.splitlines()):
os_call_match = OS_CALL_REGEX.search(line)
if os_call_match:
self.reporter.error(
path=self.object_path,
code='use-run-command-not-os-call',
msg=('os.call() call found. Should be module.run_command'),
line=(line_no + 1),
column=(os_call_match.span()[0] + 1)
)
def _find_rejectlist_imports(self):
for child in self.ast.body:
names = []
if isinstance(child, ast.Import):
names.extend(child.names)
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
names.extend(grandchild.names)
for name in names:
# TODO: Add line/col
for rejectlist_import, options in REJECTLIST_IMPORTS.items():
if re.search(rejectlist_import, name.name):
new_only = options['new_only']
if self._is_new_module() and new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
elif not new_only:
self.reporter.error(
path=self.object_path,
**options['error']
)
def _find_module_utils(self):
linenos = []
found_basic = False
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
names = []
try:
names.append(child.module)
if child.module.endswith('.basic'):
found_basic = True
except AttributeError:
pass
names.extend([n.name for n in child.names])
if [n for n in names if n.startswith('ansible.module_utils')]:
linenos.append(child.lineno)
for name in child.names:
if ('module_utils' in getattr(child, 'module', '') and
isinstance(name, ast.alias) and
name.name == '*'):
msg = (
'module-utils-specific-import',
('module_utils imports should import specific '
'components, not "*"')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=child.lineno
)
if (isinstance(name, ast.alias) and
name.name == 'basic'):
found_basic = True
if not found_basic:
self.reporter.warning(
path=self.object_path,
code='missing-module-utils-basic-import',
msg='Did not find "ansible.module_utils.basic" import'
)
return linenos
def _get_first_callable(self):
linenos = []
for child in self.ast.body:
if isinstance(child, (ast.FunctionDef, ast.ClassDef)):
linenos.append(child.lineno)
return min(linenos)
def _find_has_import(self):
for child in self.ast.body:
found_try_except_import = False
found_has = False
if isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, ast.Import):
found_try_except_import = True
if isinstance(grandchild, ast.Assign):
for target in grandchild.targets:
if not isinstance(target, ast.Name):
continue
if target.id.lower().startswith('has_'):
found_has = True
if found_try_except_import and not found_has:
# TODO: Add line/col
self.reporter.warning(
path=self.object_path,
code='try-except-missing-has',
msg='Found Try/Except block without HAS_ assignment'
)
def _ensure_imports_below_docs(self, doc_info, first_callable):
try:
min_doc_line = min(
[doc_info[key]['lineno'] for key in doc_info if doc_info[key]['lineno']]
)
except ValueError:
# We can't perform this validation, as there are no DOCs provided at all
return
max_doc_line = max(
[doc_info[key]['end_lineno'] for key in doc_info if doc_info[key]['end_lineno']]
)
import_lines = []
for child in self.ast.body:
if isinstance(child, (ast.Import, ast.ImportFrom)):
if isinstance(child, ast.ImportFrom) and child.module == '__future__':
# allowed from __future__ imports
for future_import in child.names:
if future_import.name not in self.ACCEPTLIST_FUTURE_IMPORTS:
self.reporter.error(
path=self.object_path,
code='illegal-future-imports',
msg=('Only the following from __future__ imports are allowed: %s'
% ', '.join(self.ACCEPTLIST_FUTURE_IMPORTS)),
line=child.lineno
)
break
else: # for-else. If we didn't find a problem nad break out of the loop, then this is a legal import
continue
import_lines.append(child.lineno)
if child.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation variables. '
'All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
elif isinstance(child, TRY_EXCEPT):
bodies = child.body
for handler in child.handlers:
bodies.extend(handler.body)
for grandchild in bodies:
if isinstance(grandchild, (ast.Import, ast.ImportFrom)):
import_lines.append(grandchild.lineno)
if grandchild.lineno < min_doc_line:
self.reporter.error(
path=self.object_path,
code='import-before-documentation',
msg=('Import found before documentation '
'variables. All imports must appear below '
'DOCUMENTATION/EXAMPLES/RETURN.'),
line=child.lineno
)
break
for import_line in import_lines:
if not (max_doc_line < import_line < first_callable):
msg = (
'import-placement',
('Imports should be directly below DOCUMENTATION/EXAMPLES/'
'RETURN.')
)
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
else:
self.reporter.warning(
path=self.object_path,
code=msg[0],
msg=msg[1],
line=import_line
)
def _validate_ps_replacers(self):
# loop all (for/else + error)
# get module list for each
# check "shape" of each module name
module_requires = r'(?im)^#\s*requires\s+\-module(?:s?)\s*(Ansible\.ModuleUtils\..+)'
csharp_requires = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*(Ansible\..+)'
found_requires = False
for req_stmt in re.finditer(module_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-utils-per-requires',
msg='Ansible.ModuleUtils requirements do not support multiple modules per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.psm1'):
self.reporter.error(
path=self.object_path,
code='invalid-requires-extension',
msg='Module #Requires should not end in .psm1: "%s"' % module_name
)
for req_stmt in re.finditer(csharp_requires, self.text):
found_requires = True
# this will bomb on dictionary format - "don't do that"
module_list = [x.strip() for x in req_stmt.group(1).split(',')]
if len(module_list) > 1:
self.reporter.error(
path=self.object_path,
code='multiple-csharp-utils-per-requires',
msg='Ansible C# util requirements do not support multiple utils per statement: "%s"' % req_stmt.group(0)
)
continue
module_name = module_list[0]
if module_name.lower().endswith('.cs'):
self.reporter.error(
path=self.object_path,
code='illegal-extension-cs',
msg='Module #AnsibleRequires -CSharpUtil should not end in .cs: "%s"' % module_name
)
# also accept the legacy #POWERSHELL_COMMON replacer signal
if not found_requires and REPLACER_WINDOWS not in self.text:
self.reporter.error(
path=self.object_path,
code='missing-module-utils-import-csharp-requirements',
msg='No Ansible.ModuleUtils or C# Ansible util requirements/imports found'
)
def _find_ps_docs_py_file(self):
if self.object_name in self.PS_DOC_REJECTLIST:
return
py_path = self.path.replace('.ps1', '.py')
if not os.path.isfile(py_path):
self.reporter.error(
path=self.object_path,
code='missing-python-doc',
msg='Missing python documentation file'
)
return py_path
def _get_docs(self):
docs = {
'DOCUMENTATION': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'EXAMPLES': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
'RETURN': {
'value': None,
'lineno': 0,
'end_lineno': 0,
},
}
for child in self.ast.body:
if isinstance(child, ast.Assign):
for grandchild in child.targets:
if not isinstance(grandchild, ast.Name):
continue
if grandchild.id == 'DOCUMENTATION':
docs['DOCUMENTATION']['value'] = child.value.s
docs['DOCUMENTATION']['lineno'] = child.lineno
docs['DOCUMENTATION']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'EXAMPLES':
docs['EXAMPLES']['value'] = child.value.s
docs['EXAMPLES']['lineno'] = child.lineno
docs['EXAMPLES']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
elif grandchild.id == 'RETURN':
docs['RETURN']['value'] = child.value.s
docs['RETURN']['lineno'] = child.lineno
docs['RETURN']['end_lineno'] = (
child.lineno + len(child.value.s.splitlines())
)
return docs
def _validate_docs_schema(self, doc, schema, name, error_code):
# TODO: Add line/col
errors = []
try:
schema(doc)
except Exception as e:
for error in e.errors:
error.data = doc
errors.extend(e.errors)
for error in errors:
path = [str(p) for p in error.path]
local_error_code = getattr(error, 'ansible_error_code', error_code)
if isinstance(error.data, dict):
error_message = humanize_error(error.data, error)
else:
error_message = error
if path:
combined_path = '%s.%s' % (name, '.'.join(path))
else:
combined_path = name
self.reporter.error(
path=self.object_path,
code=local_error_code,
msg='%s: %s' % (combined_path, error_message)
)
def _validate_docs(self):
doc_info = self._get_docs()
doc = None
documentation_exists = False
examples_exist = False
returns_exist = False
# We have three ways of marking deprecated/removed files. Have to check each one
# individually and then make sure they all agree
filename_deprecated_or_removed = False
deprecated = False
removed = False
doc_deprecated = None # doc legally might not exist
routing_says_deprecated = False
if self.object_name.startswith('_') and not os.path.islink(self.object_path):
filename_deprecated_or_removed = True
# We are testing a collection
if self.routing:
routing_deprecation = self.routing.get('plugin_routing', {}).get('modules', {}).get(self.name, {}).get('deprecation', {})
if routing_deprecation:
# meta/runtime.yml says this is deprecated
routing_says_deprecated = True
deprecated = True
if not removed:
if not bool(doc_info['DOCUMENTATION']['value']):
self.reporter.error(
path=self.object_path,
code='missing-documentation',
msg='No DOCUMENTATION provided'
)
else:
documentation_exists = True
doc, errors, traces = parse_yaml(
doc_info['DOCUMENTATION']['value'],
doc_info['DOCUMENTATION']['lineno'],
self.name, 'DOCUMENTATION'
)
if doc:
add_collection_to_versions_and_dates(doc, self.collection_name, is_module=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code='documentation-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not errors and not traces:
missing_fragment = False
with CaptureStd():
try:
get_docstring(self.path, fragment_loader, verbose=True,
collection_name=self.collection_name, is_module=True)
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.error(
path=self.object_path,
code='missing-doc-fragment',
msg='DOCUMENTATION fragment missing: %s' % fragment
)
missing_fragment = True
except Exception as e:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
self.reporter.error(
path=self.object_path,
code='documentation-error',
msg='Unknown DOCUMENTATION error, see TRACE: %s' % e
)
if not missing_fragment:
add_fragments(doc, self.object_path, fragment_loader=fragment_loader, is_module=True)
if 'options' in doc and doc['options'] is None:
self.reporter.error(
path=self.object_path,
code='invalid-documentation-options',
msg='DOCUMENTATION.options must be a dictionary/hash when used',
)
if 'deprecated' in doc and doc.get('deprecated'):
doc_deprecated = True
doc_deprecation = doc['deprecated']
documentation_collection = doc_deprecation.get('removed_from_collection')
if documentation_collection != self.collection_name:
self.reporter.error(
path=self.object_path,
code='deprecation-wrong-collection',
msg='"DOCUMENTATION.deprecation.removed_from_collection must be the current collection name: %r vs. %r' % (
documentation_collection, self.collection_name)
)
else:
doc_deprecated = False
if os.path.islink(self.object_path):
# This module has an alias, which we can tell as it's a symlink
# Rather than checking for `module: $filename` we need to check against the true filename
self._validate_docs_schema(
doc,
doc_schema(
os.readlink(self.object_path).split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
),
'DOCUMENTATION',
'invalid-documentation',
)
else:
# This is the normal case
self._validate_docs_schema(
doc,
doc_schema(
self.object_name.split('.')[0],
for_collection=bool(self.collection),
deprecated_module=deprecated,
),
'DOCUMENTATION',
'invalid-documentation',
)
if not self.collection:
existing_doc = self._check_for_new_args(doc)
self._check_version_added(doc, existing_doc)
if not bool(doc_info['EXAMPLES']['value']):
self.reporter.error(
path=self.object_path,
code='missing-examples',
msg='No EXAMPLES provided'
)
else:
_doc, errors, traces = parse_yaml(doc_info['EXAMPLES']['value'],
doc_info['EXAMPLES']['lineno'],
self.name, 'EXAMPLES', load_all=True,
ansible_loader=True)
for error in errors:
self.reporter.error(
path=self.object_path,
code='invalid-examples',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
if not bool(doc_info['RETURN']['value']):
if self._is_new_module():
self.reporter.error(
path=self.object_path,
code='missing-return',
msg='No RETURN provided'
)
else:
self.reporter.warning(
path=self.object_path,
code='missing-return-legacy',
msg='No RETURN provided'
)
else:
data, errors, traces = parse_yaml(doc_info['RETURN']['value'],
doc_info['RETURN']['lineno'],
self.name, 'RETURN')
if data:
add_collection_to_versions_and_dates(data, self.collection_name, is_module=True, return_docs=True)
self._validate_docs_schema(data, return_schema(for_collection=bool(self.collection)),
'RETURN', 'return-syntax-error')
for error in errors:
self.reporter.error(
path=self.object_path,
code='return-syntax-error',
**error
)
for trace in traces:
self.reporter.trace(
path=self.object_path,
tracebk=trace
)
# Check for mismatched deprecation
if not self.collection:
mismatched_deprecation = True
if not (filename_deprecated_or_removed or removed or deprecated or doc_deprecated):
mismatched_deprecation = False
else:
if (filename_deprecated_or_removed and doc_deprecated):
mismatched_deprecation = False
if (filename_deprecated_or_removed and removed and not (documentation_exists or examples_exist or returns_exist)):
mismatched_deprecation = False
if mismatched_deprecation:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='Module deprecation/removed must agree in documentation, by prepending filename with'
' "_", and setting DOCUMENTATION.deprecated for deprecation or by removing all'
' documentation for removed'
)
else:
# We are testing a collection
if self.object_name.startswith('_'):
self.reporter.error(
path=self.object_path,
code='collections-no-underscore-on-deprecation',
msg='Deprecated content in collections MUST NOT start with "_", update meta/runtime.yml instead',
)
if not (doc_deprecated == routing_says_deprecated):
# DOCUMENTATION.deprecated and meta/runtime.yml disagree
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree.'
)
elif routing_says_deprecated:
# Both DOCUMENTATION.deprecated and meta/runtime.yml agree that the module is deprecated.
# Make sure they give the same version or date.
routing_date = routing_deprecation.get('removal_date')
routing_version = routing_deprecation.get('removal_version')
# The versions and dates in the module documentation are auto-tagged, so remove the tag
# to make comparison possible and to avoid confusing the user.
documentation_date = doc_deprecation.get('removed_at_date')
documentation_version = doc_deprecation.get('removed_in')
if not compare_dates(routing_date, documentation_date):
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal date: %r vs. %r' % (
routing_date, documentation_date)
)
if routing_version != documentation_version:
self.reporter.error(
path=self.object_path,
code='deprecation-mismatch',
msg='"meta/runtime.yml" and DOCUMENTATION.deprecation do not agree on removal version: %r vs. %r' % (
routing_version, documentation_version)
)
# In the future we should error if ANSIBLE_METADATA exists in a collection
return doc_info, doc
def _check_version_added(self, doc, existing_doc):
version_added_raw = doc.get('version_added')
try:
collection_name = doc.get('version_added_collection')
version_added = self._create_strict_version(
str(version_added_raw or '0.0'),
collection_name=collection_name)
except ValueError as e:
version_added = version_added_raw or '0.0'
if self._is_new_module() or version_added != 'historical':
# already reported during schema validation, except:
if version_added == 'historical':
self.reporter.error(
path=self.object_path,
code='module-invalid-version-added',
msg='version_added is not a valid version number: %r. Error: %s' % (version_added, e)
)
return
if existing_doc and str(version_added_raw) != str(existing_doc.get('version_added')):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (existing_doc.get('version_added'), version_added_raw)
)
if not self._is_new_module():
return
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
if (version_added < strict_ansible_version or
strict_ansible_version < version_added):
self.reporter.error(
path=self.object_path,
code='module-incorrect-version-added',
msg='version_added should be %r. Currently %r' % (should_be, version_added_raw)
)
def _validate_ansible_module_call(self, docs):
try:
spec, args, kwargs = get_argument_spec(self.path, self.collection)
except AnsibleModuleNotInitialized:
self.reporter.error(
path=self.object_path,
code='ansible-module-not-initialized',
msg="Execution of the module did not result in initialization of AnsibleModule",
)
return
except AnsibleModuleImportError as e:
self.reporter.error(
path=self.object_path,
code='import-error',
msg="Exception attempting to import module for argument_spec introspection, '%s'" % e
)
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
self._validate_docs_schema(kwargs, ansible_module_kwargs_schema(for_collection=bool(self.collection)),
'AnsibleModule', 'invalid-ansiblemodule-schema')
self._validate_argument_spec(docs, spec, kwargs)
def _validate_list_of_module_args(self, name, terms, spec, context):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)):
# This is already reported by schema checking
continue
bad_term = False
for term in check:
if not isinstance(term, string_types):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must contain strings in the lists or tuples; found value %r" % (term, )
self.reporter.error(
path=self.object_path,
code=name + '-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(check)) != len(check):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code=name + '-collision',
msg=msg,
)
if not set(check) <= set(spec):
msg = name
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(check).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code=name + '-unknown',
msg=msg,
)
def _validate_required_if(self, terms, spec, context, module):
if terms is None:
return
if not isinstance(terms, (list, tuple)):
# This is already reported by schema checking
return
for check in terms:
if not isinstance(check, (list, tuple)) or len(check) not in [3, 4]:
# This is already reported by schema checking
continue
if len(check) == 4 and not isinstance(check[3], bool):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have forth value omitted or of type bool; got %r" % (check[3], )
self.reporter.error(
path=self.object_path,
code='required_if-is_one_of-type',
msg=msg,
)
requirements = check[2]
if not isinstance(requirements, (list, tuple)):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have third value (requirements) being a list or tuple; got type %r" % (requirements, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
continue
bad_term = False
for term in requirements:
if not isinstance(term, string_types):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have only strings in third value (requirements); got %r" % (term, )
self.reporter.error(
path=self.object_path,
code='required_if-requirements-type',
msg=msg,
)
bad_term = True
if bad_term:
continue
if len(set(requirements)) != len(requirements):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms in requirements"
self.reporter.error(
path=self.object_path,
code='required_if-requirements-collision',
msg=msg,
)
if not set(requirements) <= set(spec):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms in requirements which are not part of argument_spec: %s" % ", ".join(sorted(set(requirements).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_if-requirements-unknown',
msg=msg,
)
key = check[0]
if key not in spec:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must have its key %s in argument_spec" % key
self.reporter.error(
path=self.object_path,
code='required_if-unknown-key',
msg=msg,
)
continue
if key in requirements:
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains its key %s in requirements" % key
self.reporter.error(
path=self.object_path,
code='required_if-key-in-requirements',
msg=msg,
)
value = check[1]
if value is not None:
_type = spec[key].get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = DEFAULT_TYPE_VALIDATORS.get(_type)
try:
with CaptureStd():
dummy = _type_checker(value)
except (Exception, SystemExit):
msg = "required_if"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has value %r which does not fit to %s's parameter type %r" % (value, key, _type)
self.reporter.error(
path=self.object_path,
code='required_if-value-type',
msg=msg,
)
def _validate_required_by(self, terms, spec, context):
if terms is None:
return
if not isinstance(terms, Mapping):
# This is already reported by schema checking
return
for key, value in terms.items():
if isinstance(value, string_types):
value = [value]
if not isinstance(value, (list, tuple)):
# This is already reported by schema checking
continue
for term in value:
if not isinstance(term, string_types):
# This is already reported by schema checking
continue
if len(set(value)) != len(value) or key in value:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has repeated terms"
self.reporter.error(
path=self.object_path,
code='required_by-collision',
msg=msg,
)
if not set(value) <= set(spec) or key not in spec:
msg = "required_by"
if context:
msg += " found in %s" % " -> ".join(context)
msg += " contains terms which are not part of argument_spec: %s" % ", ".join(sorted(set(value).difference(set(spec))))
self.reporter.error(
path=self.object_path,
code='required_by-unknown',
msg=msg,
)
def _validate_argument_spec(self, docs, spec, kwargs, context=None, last_context_spec=None):
if not self.analyze_arg_spec:
return
if docs is None:
docs = {}
if context is None:
context = []
if last_context_spec is None:
last_context_spec = kwargs
try:
if not context:
add_fragments(docs, self.object_path, fragment_loader=fragment_loader, is_module=True)
except Exception:
# Cannot merge fragments
return
# Use this to access type checkers later
module = NoArgsAnsibleModule({})
self._validate_list_of_module_args('mutually_exclusive', last_context_spec.get('mutually_exclusive'), spec, context)
self._validate_list_of_module_args('required_together', last_context_spec.get('required_together'), spec, context)
self._validate_list_of_module_args('required_one_of', last_context_spec.get('required_one_of'), spec, context)
self._validate_required_if(last_context_spec.get('required_if'), spec, context, module)
self._validate_required_by(last_context_spec.get('required_by'), spec, context)
provider_args = set()
args_from_argspec = set()
deprecated_args_from_argspec = set()
doc_options = docs.get('options', {})
if doc_options is None:
doc_options = {}
for arg, data in spec.items():
restricted_argument_names = ('message', 'syslog_facility')
if arg.lower() in restricted_argument_names:
msg = "Argument '%s' in argument_spec " % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
if 'aliases' in data:
for al in data['aliases']:
if al.lower() in restricted_argument_names:
msg = "Argument alias '%s' in argument_spec " % al
if context:
msg += " found in %s" % " -> ".join(context)
msg += "must not be one of %s as it is used " \
"internally by Ansible Core Engine" % (",".join(restricted_argument_names))
self.reporter.error(
path=self.object_path,
code='invalid-argument-name',
msg=msg,
)
continue
# Could this a place where secrets are leaked?
# If it is type: path we know it's not a secret key as it's a file path.
# If it is type: bool it is more likely a flag indicating that something is secret, than an actual secret.
if all((
data.get('no_log') is None, is_potential_secret_option(arg),
data.get('type') not in ("path", "bool"), data.get('choices') is None,
)):
msg = "Argument '%s' in argument_spec could be a secret, though doesn't have `no_log` set" % arg
if context:
msg += " found in %s" % " -> ".join(context)
self.reporter.error(
path=self.object_path,
code='no-log-needed',
msg=msg,
)
if not isinstance(data, dict):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " must be a dictionary/hash when used"
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec',
msg=msg,
)
continue
removed_at_date = data.get('removed_at_date', None)
if removed_at_date is not None:
try:
if parse_isodate(removed_at_date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a removed_at_date '%s' before today" % removed_at_date
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when removed_at_date is not in ISO format. Since schema
# validation already reported this as an error, don't report it a second time.
pass
deprecated_aliases = data.get('deprecated_aliases', None)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'date' in deprecated_alias:
try:
date = deprecated_alias['date']
if parse_isodate(date, allow_date=False) < datetime.date.today():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal date '%s' before today" % (
deprecated_alias['name'], deprecated_alias['date'])
self.reporter.error(
path=self.object_path,
code='deprecated-date',
msg=msg,
)
except ValueError:
# This should only happen when deprecated_alias['date'] is not in ISO format. Since
# schema validation already reported this as an error, don't report it a second
# time.
pass
has_version = False
if self.collection and self.collection_version is not None:
compare_version = self.collection_version
version_of_what = "this collection (%s)" % self.collection_version_str
code_prefix = 'collection'
has_version = True
elif not self.collection:
compare_version = LOOSE_ANSIBLE_VERSION
version_of_what = "Ansible (%s)" % ansible_version
code_prefix = 'ansible'
has_version = True
removed_in_version = data.get('removed_in_version', None)
if removed_in_version is not None:
try:
collection_name = data.get('removed_from_collection')
removed_in = self._create_version(str(removed_in_version), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= removed_in:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has a deprecated removed_in_version %r," % removed_in_version
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: %s" % (removed_in_version, e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has an invalid removed_in_version number %r: " % (removed_in_version, )
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
if deprecated_aliases is not None:
for deprecated_alias in deprecated_aliases:
if 'name' in deprecated_alias and 'version' in deprecated_alias:
try:
collection_name = deprecated_alias.get('collection_name')
version = self._create_version(str(deprecated_alias['version']), collection_name=collection_name)
if has_version and collection_name == self.collection_name and compare_version >= version:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with removal in version %r," % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " i.e. the version is less than or equal to the current version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code=code_prefix + '-deprecated-version',
msg=msg,
)
except ValueError as e:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r: %s" % (
deprecated_alias['name'], deprecated_alias['version'], e)
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
except TypeError:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has deprecated aliases '%s' with invalid removal version %r:" % (
deprecated_alias['name'], deprecated_alias['version'])
msg += " error while comparing to version of %s" % version_of_what
self.reporter.error(
path=self.object_path,
code='invalid-deprecated-version',
msg=msg,
)
aliases = data.get('aliases', [])
if arg in aliases:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is specified as its own alias"
self.reporter.error(
path=self.object_path,
code='parameter-alias-self',
msg=msg
)
if len(aliases) > len(set(aliases)):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has at least one alias specified multiple times in aliases"
self.reporter.error(
path=self.object_path,
code='parameter-alias-repeated',
msg=msg
)
if not context and arg == 'state':
bad_states = set(['list', 'info', 'get']) & set(data.get('choices', set()))
for bad_state in bad_states:
self.reporter.error(
path=self.object_path,
code='parameter-state-invalid-choice',
msg="Argument 'state' includes the value '%s' as a choice" % bad_state)
if not data.get('removed_in_version', None) and not data.get('removed_at_date', None):
args_from_argspec.add(arg)
args_from_argspec.update(aliases)
else:
deprecated_args_from_argspec.add(arg)
deprecated_args_from_argspec.update(aliases)
if arg == 'provider' and self.object_path.startswith('lib/ansible/modules/network/'):
if data.get('options') is not None and not isinstance(data.get('options'), Mapping):
self.reporter.error(
path=self.object_path,
code='invalid-argument-spec-options',
msg="Argument 'options' in argument_spec['provider'] must be a dictionary/hash when used",
)
elif data.get('options'):
# Record provider options from network modules, for later comparison
for provider_arg, provider_data in data.get('options', {}).items():
provider_args.add(provider_arg)
provider_args.update(provider_data.get('aliases', []))
if data.get('required') and data.get('default', object) != object:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is marked as required but specifies a default. Arguments with a" \
" default should not be marked as required"
self.reporter.error(
path=self.object_path,
code='no-default-for-required-parameter',
msg=msg
)
if arg in provider_args:
# Provider args are being removed from network module top level
# don't validate docs<->arg_spec checks below
continue
_type = data.get('type', 'str')
if callable(_type):
_type_checker = _type
else:
_type_checker = DEFAULT_TYPE_VALIDATORS.get(_type)
_elements = data.get('elements')
if (_type == 'list') and not _elements:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as list but elements is not defined"
self.reporter.error(
path=self.object_path,
code='parameter-list-no-elements',
msg=msg
)
if _elements:
if not callable(_elements):
DEFAULT_TYPE_VALIDATORS.get(_elements)
if _type != 'list':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines elements as %s but it is valid only when value of parameter type is list" % _elements
self.reporter.error(
path=self.object_path,
code='parameter-invalid-elements',
msg=msg
)
arg_default = None
if 'default' in data and not is_empty(data['default']):
try:
with CaptureStd():
arg_default = _type_checker(data['default'])
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (data['default'], _type)
self.reporter.error(
path=self.object_path,
code='incompatible-default-type',
msg=msg
)
continue
doc_options_args = []
for alias in sorted(set([arg] + list(aliases))):
if alias in doc_options:
doc_options_args.append(alias)
if len(doc_options_args) == 0:
# Undocumented arguments will be handled later (search for undocumented-parameter)
doc_options_arg = {}
else:
doc_options_arg = doc_options[doc_options_args[0]]
if len(doc_options_args) > 1:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " with aliases %s is documented multiple times, namely as %s" % (
", ".join([("'%s'" % alias) for alias in aliases]),
", ".join([("'%s'" % alias) for alias in doc_options_args])
)
self.reporter.error(
path=self.object_path,
code='parameter-documented-multiple-times',
msg=msg
)
try:
doc_default = None
if 'default' in doc_options_arg and not is_empty(doc_options_arg['default']):
with CaptureStd():
doc_default = _type_checker(doc_options_arg['default'])
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but this is incompatible with parameter type %r" % (doc_options_arg.get('default'), _type)
self.reporter.error(
path=self.object_path,
code='doc-default-incompatible-type',
msg=msg
)
continue
if arg_default != doc_default:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines default as (%r) but documentation defines default as (%r)" % (arg_default, doc_default)
self.reporter.error(
path=self.object_path,
code='doc-default-does-not-match-spec',
msg=msg
)
doc_type = doc_options_arg.get('type')
if 'type' in data and data['type'] is not None:
if doc_type is None:
if not arg.startswith('_'): # hidden parameter, for example _raw_params
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation doesn't define type" % (data['type'])
self.reporter.error(
path=self.object_path,
code='parameter-type-not-in-doc',
msg=msg
)
elif data['type'] != doc_type:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines type as %r but documentation defines type as %r" % (data['type'], doc_type)
self.reporter.error(
path=self.object_path,
code='doc-type-does-not-match-spec',
msg=msg
)
else:
if doc_type is None:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " uses default type ('str') but documentation doesn't define type"
self.reporter.error(
path=self.object_path,
code='doc-missing-type',
msg=msg
)
elif doc_type != 'str':
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " implies type as 'str' but documentation defines as %r" % doc_type
self.reporter.error(
path=self.object_path,
code='implied-parameter-type-mismatch',
msg=msg
)
doc_choices = []
try:
for choice in doc_options_arg.get('choices', []):
try:
with CaptureStd():
doc_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in documentation" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='doc-choices-incompatible-type',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
arg_choices = []
try:
for choice in data.get('choices', []):
try:
with CaptureStd():
arg_choices.append(_type_checker(choice))
except (Exception, SystemExit):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but this is incompatible with argument type %r" % (choice, _type)
self.reporter.error(
path=self.object_path,
code='incompatible-choices',
msg=msg
)
raise StopIteration()
except StopIteration:
continue
if not compare_unordered_lists(arg_choices, doc_choices):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines choices as (%r) but documentation defines choices as (%r)" % (arg_choices, doc_choices)
self.reporter.error(
path=self.object_path,
code='doc-choices-do-not-match-spec',
msg=msg
)
doc_required = doc_options_arg.get('required', False)
data_required = data.get('required', False)
if (doc_required or data_required) and not (doc_required and data_required):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if doc_required:
msg += " is not required, but is documented as being required"
else:
msg += " is required, but is not documented as being required"
self.reporter.error(
path=self.object_path,
code='doc-required-mismatch',
msg=msg
)
doc_elements = doc_options_arg.get('elements', None)
doc_type = doc_options_arg.get('type', 'str')
data_elements = data.get('elements', None)
if (doc_elements and not doc_type == 'list'):
msg = "Argument '%s' " % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " defines parameter elements as %s but it is valid only when value of parameter type is list" % doc_elements
self.reporter.error(
path=self.object_path,
code='doc-elements-invalid',
msg=msg
)
if (doc_elements or data_elements) and not (doc_elements == data_elements):
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
if data_elements:
msg += " specifies elements as %s," % data_elements
else:
msg += " does not specify elements,"
if doc_elements:
msg += "but elements is documented as being %s" % doc_elements
else:
msg += "but elements is not documented"
self.reporter.error(
path=self.object_path,
code='doc-elements-mismatch',
msg=msg
)
spec_suboptions = data.get('options')
doc_suboptions = doc_options_arg.get('suboptions', {})
if spec_suboptions:
if not doc_suboptions:
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " has sub-options but documentation does not define it"
self.reporter.error(
path=self.object_path,
code='missing-suboption-docs',
msg=msg
)
self._validate_argument_spec({'options': doc_suboptions}, spec_suboptions, kwargs,
context=context + [arg], last_context_spec=data)
for arg in args_from_argspec:
if not str(arg).isidentifier():
msg = "Argument '%s' in argument_spec" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is not a valid python identifier"
self.reporter.error(
path=self.object_path,
code='parameter-invalid',
msg=msg
)
if docs:
args_from_docs = set()
for arg, data in doc_options.items():
args_from_docs.add(arg)
args_from_docs.update(data.get('aliases', []))
args_missing_from_docs = args_from_argspec.difference(args_from_docs)
docs_missing_from_args = args_from_docs.difference(args_from_argspec | deprecated_args_from_argspec)
for arg in args_missing_from_docs:
if arg in provider_args:
# Provider args are being removed from network module top level
# So they are likely not documented on purpose
continue
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in the argument_spec, but not documented in the module documentation"
self.reporter.error(
path=self.object_path,
code='undocumented-parameter',
msg=msg
)
for arg in docs_missing_from_args:
msg = "Argument '%s'" % arg
if context:
msg += " found in %s" % " -> ".join(context)
msg += " is listed in DOCUMENTATION.options, but not accepted by the module argument_spec"
self.reporter.error(
path=self.object_path,
code='nonexistent-parameter-documented',
msg=msg
)
def _check_for_new_args(self, doc):
if not self.base_branch or self._is_new_module():
return
with CaptureStd():
try:
existing_doc, dummy_examples, dummy_return, existing_metadata = get_docstring(
self.base_module, fragment_loader, verbose=True, collection_name=self.collection_name, is_module=True)
existing_options = existing_doc.get('options', {}) or {}
except AssertionError:
fragment = doc['extends_documentation_fragment']
self.reporter.warning(
path=self.object_path,
code='missing-existing-doc-fragment',
msg='Pre-existing DOCUMENTATION fragment missing: %s' % fragment
)
return
except Exception as e:
self.reporter.warning_trace(
path=self.object_path,
tracebk=e
)
self.reporter.warning(
path=self.object_path,
code='unknown-doc-fragment',
msg=('Unknown pre-existing DOCUMENTATION error, see TRACE. Submodule refs may need updated')
)
return
try:
mod_collection_name = existing_doc.get('version_added_collection')
mod_version_added = self._create_strict_version(
str(existing_doc.get('version_added', '0.0')),
collection_name=mod_collection_name)
except ValueError:
mod_collection_name = self.collection_name
mod_version_added = self._create_strict_version('0.0')
options = doc.get('options', {}) or {}
should_be = '.'.join(ansible_version.split('.')[:2])
strict_ansible_version = self._create_strict_version(should_be, collection_name='ansible.builtin')
for option, details in options.items():
try:
names = [option] + details.get('aliases', [])
except (TypeError, AttributeError):
# Reporting of this syntax error will be handled by schema validation.
continue
if any(name in existing_options for name in names):
# The option already existed. Make sure version_added didn't change.
for name in names:
existing_collection_name = existing_options.get(name, {}).get('version_added_collection')
existing_version = existing_options.get(name, {}).get('version_added')
if existing_version:
break
current_collection_name = details.get('version_added_collection')
current_version = details.get('version_added')
if current_collection_name != existing_collection_name:
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added-collection',
msg=('version_added for existing option (%s) should '
'belong to collection %r. Currently belongs to %r' %
(option, current_collection_name, existing_collection_name))
)
elif str(current_version) != str(existing_version):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for existing option (%s) should '
'be %r. Currently %r' %
(option, existing_version, current_version))
)
continue
try:
collection_name = details.get('version_added_collection')
version_added = self._create_strict_version(
str(details.get('version_added', '0.0')),
collection_name=collection_name)
except ValueError as e:
# already reported during schema validation
continue
if collection_name != self.collection_name:
continue
if (strict_ansible_version != mod_version_added and
(version_added < strict_ansible_version or
strict_ansible_version < version_added)):
self.reporter.error(
path=self.object_path,
code='option-incorrect-version-added',
msg=('version_added for new option (%s) should '
'be %r. Currently %r' %
(option, should_be, version_added))
)
return existing_doc
@staticmethod
def is_on_rejectlist(path):
base_name = os.path.basename(path)
file_name = os.path.splitext(base_name)[0]
if file_name.startswith('_') and os.path.islink(path):
return True
if not frozenset((base_name, file_name)).isdisjoint(ModuleValidator.REJECTLIST):
return True
for pat in ModuleValidator.REJECTLIST_PATTERNS:
if fnmatch(base_name, pat):
return True
return False
def validate(self):
super(ModuleValidator, self).validate()
if not self._python_module() and not self._powershell_module():
self.reporter.error(
path=self.object_path,
code='invalid-extension',
msg=('Official Ansible modules must have a .py '
'extension for python modules or a .ps1 '
'for powershell modules')
)
self._python_module_override = True
if self._python_module() and self.ast is None:
self.reporter.error(
path=self.object_path,
code='python-syntax-error',
msg='Python SyntaxError while parsing module'
)
try:
compile(self.text, self.path, 'exec')
except Exception:
self.reporter.trace(
path=self.object_path,
tracebk=traceback.format_exc()
)
return
end_of_deprecation_should_be_removed_only = False
if self._python_module():
doc_info, docs = self._validate_docs()
# See if current version => deprecated.removed_in, ie, should be docs only
if docs and docs.get('deprecated', False):
if 'removed_in' in docs['deprecated']:
removed_in = None
collection_name = docs['deprecated'].get('removed_from_collection')
version = docs['deprecated']['removed_in']
if collection_name != self.collection_name:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-source',
msg=('The deprecation version for a module must be added in this collection')
)
else:
try:
removed_in = self._create_strict_version(str(version), collection_name=collection_name)
except ValueError as e:
self.reporter.error(
path=self.object_path,
code='invalid-module-deprecation-version',
msg=('The deprecation version %r cannot be parsed: %s' % (version, e))
)
if removed_in:
if not self.collection:
strict_ansible_version = self._create_strict_version(
'.'.join(ansible_version.split('.')[:2]), self.collection_name)
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
if end_of_deprecation_should_be_removed_only:
self.reporter.error(
path=self.object_path,
code='ansible-deprecated-module',
msg='Module is marked for removal in version %s of Ansible when the current version is %s' % (
version, ansible_version),
)
elif self.collection_version:
strict_ansible_version = self.collection_version
end_of_deprecation_should_be_removed_only = strict_ansible_version >= removed_in
if end_of_deprecation_should_be_removed_only:
self.reporter.error(
path=self.object_path,
code='collection-deprecated-module',
msg='Module is marked for removal in version %s of this collection when the current version is %s' % (
version, self.collection_version_str),
)
# handle deprecation by date
if 'removed_at_date' in docs['deprecated']:
try:
removed_at_date = docs['deprecated']['removed_at_date']
if parse_isodate(removed_at_date, allow_date=True) < datetime.date.today():
msg = "Module's deprecated.removed_at_date date '%s' is before today" % removed_at_date
self.reporter.error(path=self.object_path, code='deprecated-date', msg=msg)
except ValueError:
# This happens if the date cannot be parsed. This is already checked by the schema.
pass
if self._python_module() and not self._just_docs() and not end_of_deprecation_should_be_removed_only:
self._validate_ansible_module_call(docs)
self._check_for_sys_exit()
self._find_rejectlist_imports()
self._find_module_utils()
self._find_has_import()
first_callable = self._get_first_callable()
self._ensure_imports_below_docs(doc_info, first_callable)
self._check_for_subprocess()
self._check_for_os_call()
if self._powershell_module():
if self.basename in self.PS_DOC_REJECTLIST:
return
self._validate_ps_replacers()
docs_path = self._find_ps_docs_py_file()
# We can only validate PowerShell arg spec if it is using the new Ansible.Basic.AnsibleModule util
pattern = r'(?im)^#\s*ansiblerequires\s+\-csharputil\s*Ansible\.Basic'
if re.search(pattern, self.text) and self.object_name not in self.PS_ARG_VALIDATE_REJECTLIST:
with ModuleValidator(docs_path, base_branch=self.base_branch, git_cache=self.git_cache) as docs_mv:
docs = docs_mv._validate_docs()[1]
self._validate_ansible_module_call(docs)
self._check_gpl3_header()
if not self._just_docs() and not end_of_deprecation_should_be_removed_only:
self._check_interpreter(powershell=self._powershell_module())
self._check_type_instead_of_isinstance(
powershell=self._powershell_module()
)
class PythonPackageValidator(Validator):
REJECTLIST_FILES = frozenset(('__pycache__',))
def __init__(self, path, reporter=None):
super(PythonPackageValidator, self).__init__(reporter=reporter or Reporter())
self.path = path
self.basename = os.path.basename(path)
@property
def object_name(self):
return self.basename
@property
def object_path(self):
return self.path
def validate(self):
super(PythonPackageValidator, self).validate()
if self.basename in self.REJECTLIST_FILES:
return
init_file = os.path.join(self.path, '__init__.py')
if not os.path.exists(init_file):
self.reporter.error(
path=self.object_path,
code='subdirectory-missing-init',
msg='Ansible module subdirectories must contain an __init__.py'
)
def setup_collection_loader():
collections_paths = os.environ.get('ANSIBLE_COLLECTIONS_PATH', '').split(os.pathsep)
_AnsibleCollectionFinder(collections_paths)
def re_compile(value):
"""
Argparse expects things to raise TypeError, re.compile raises an re.error
exception
This function is a shorthand to convert the re.error exception to a
TypeError
"""
try:
return re.compile(value)
except re.error as e:
raise TypeError(e)
def run():
parser = argparse.ArgumentParser(prog="validate-modules")
parser.add_argument('modules', nargs='+',
help='Path to module or module directory')
parser.add_argument('-w', '--warnings', help='Show warnings',
action='store_true')
parser.add_argument('--exclude', help='RegEx exclusion pattern',
type=re_compile)
parser.add_argument('--arg-spec', help='Analyze module argument spec',
action='store_true', default=False)
parser.add_argument('--base-branch', default=None,
help='Used in determining if new options were added')
parser.add_argument('--format', choices=['json', 'plain'], default='plain',
help='Output format. Default: "%(default)s"')
parser.add_argument('--output', default='-',
help='Output location, use "-" for stdout. '
'Default "%(default)s"')
parser.add_argument('--collection',
help='Specifies the path to the collection, when '
'validating files within a collection. Ensure '
'that ANSIBLE_COLLECTIONS_PATH is set so the '
'contents of the collection can be located')
parser.add_argument('--collection-version',
help='The collection\'s version number used to check '
'deprecations')
args = parser.parse_args()
args.modules = [m.rstrip('/') for m in args.modules]
reporter = Reporter()
git_cache = GitCache(args.base_branch)
check_dirs = set()
routing = None
if args.collection:
setup_collection_loader()
routing_file = 'meta/runtime.yml'
# Load meta/runtime.yml if it exists, as it may contain deprecation information
if os.path.isfile(routing_file):
try:
with open(routing_file) as f:
routing = yaml.safe_load(f)
except yaml.error.MarkedYAMLError as ex:
print('%s:%d:%d: YAML load failed: %s' % (routing_file, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex))))
except Exception as ex: # pylint: disable=broad-except
print('%s:%d:%d: YAML load failed: %s' % (routing_file, 0, 0, re.sub(r'\s+', ' ', str(ex))))
for module in args.modules:
if os.path.isfile(module):
path = module
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_on_rejectlist(path):
continue
with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
git_cache=git_cache, reporter=reporter, routing=routing) as mv1:
mv1.validate()
check_dirs.add(os.path.dirname(path))
for root, dirs, files in os.walk(module):
basedir = root[len(module) + 1:].split('/', 1)[0]
if basedir in REJECTLIST_DIRS:
continue
for dirname in dirs:
if root == module and dirname in REJECTLIST_DIRS:
continue
path = os.path.join(root, dirname)
if args.exclude and args.exclude.search(path):
continue
check_dirs.add(path)
for filename in files:
path = os.path.join(root, filename)
if args.exclude and args.exclude.search(path):
continue
if ModuleValidator.is_on_rejectlist(path):
continue
with ModuleValidator(path, collection=args.collection, collection_version=args.collection_version,
analyze_arg_spec=args.arg_spec, base_branch=args.base_branch,
git_cache=git_cache, reporter=reporter, routing=routing) as mv2:
mv2.validate()
if not args.collection:
for path in sorted(check_dirs):
pv = PythonPackageValidator(path, reporter=reporter)
pv.validate()
if args.format == 'plain':
sys.exit(reporter.plain(warnings=args.warnings, output=args.output))
else:
sys.exit(reporter.json(warnings=args.warnings, output=args.output))
class GitCache:
def __init__(self, base_branch):
self.base_branch = base_branch
if self.base_branch:
self.base_tree = self._git(['ls-tree', '-r', '--name-only', self.base_branch, 'lib/ansible/modules/'])
else:
self.base_tree = []
try:
self.head_tree = self._git(['ls-tree', '-r', '--name-only', 'HEAD', 'lib/ansible/modules/'])
except GitError as ex:
if ex.status == 128:
# fallback when there is no .git directory
self.head_tree = self._get_module_files()
else:
raise
except OSError as ex:
if ex.errno == errno.ENOENT:
# fallback when git is not installed
self.head_tree = self._get_module_files()
else:
raise
self.base_module_paths = dict((os.path.basename(p), p) for p in self.base_tree if os.path.splitext(p)[1] in ('.py', '.ps1'))
self.base_module_paths.pop('__init__.py', None)
self.head_aliased_modules = set()
for path in self.head_tree:
filename = os.path.basename(path)
if filename.startswith('_') and filename != '__init__.py':
if os.path.islink(path):
self.head_aliased_modules.add(os.path.basename(os.path.realpath(path)))
@staticmethod
def _get_module_files():
module_files = []
for (dir_path, dir_names, file_names) in os.walk('lib/ansible/modules/'):
for file_name in file_names:
module_files.append(os.path.join(dir_path, file_name))
return module_files
@staticmethod
def _git(args):
cmd = ['git'] + args
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
raise GitError(stderr, p.returncode)
return stdout.decode('utf-8').splitlines()
class GitError(Exception):
def __init__(self, message, status):
super(GitError, self).__init__(message)
self.status = status
def main():
try:
run()
except KeyboardInterrupt:
pass
| gpl-3.0 |
Greennut/ostproject | django/middleware/transaction.py | 143 | 2311 | from django.db import transaction
class TransactionMiddleware(object):
"""
Transaction middleware. If this is enabled, each view function will be run
with commit_on_response activated - that way a save() doesn't do a direct
commit, the commit is done when a successful response is created. If an
exception happens, the database is rolled back.
"""
def process_request(self, request):
"""Enters transaction management"""
transaction.enter_transaction_management()
transaction.managed(True)
def process_exception(self, request, exception):
"""Rolls back the database and leaves transaction management"""
if transaction.is_dirty():
# This rollback might fail because of network failure for example.
# If rollback isn't possible it is impossible to clean the
# connection's state. So leave the connection in dirty state and
# let request_finished signal deal with cleaning the connection.
transaction.rollback()
transaction.leave_transaction_management()
def process_response(self, request, response):
"""Commits and leaves transaction management."""
if transaction.is_managed():
if transaction.is_dirty():
# Note: it is possible that the commit fails. If the reason is
# closed connection or some similar reason, then there is
# little hope to proceed nicely. However, in some cases (
# deferred foreign key checks for exampl) it is still possible
# to rollback().
try:
transaction.commit()
except Exception:
# If the rollback fails, the transaction state will be
# messed up. It doesn't matter, the connection will be set
# to clean state after the request finishes. And, we can't
# clean the state here properly even if we wanted to, the
# connection is in transaction but we can't rollback...
transaction.rollback()
transaction.leave_transaction_management()
raise
transaction.leave_transaction_management()
return response
| bsd-3-clause |
iTeam-org/iteam-site | iTeam/events/forms.py | 2 | 3827 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Adrien Chardon
# @Date: 2014-08-21 18:54:29
# @Last Modified by: Adrien Chardon
# @Last Modified time: 2014-12-04 19:41:49
# This file is part of iTeam.org.
# Copyright (C) 2014 Adrien Chardon (Nodraak).
#
# iTeam.org is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# iTeam.org is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with iTeam.org. If not, see <http://www.gnu.org/licenses/>.
from django import forms
from django.conf import settings
class EventForm(forms.Form):
title = forms.CharField(
label='Titre',
widget=forms.TextInput(
attrs={
'autofocus': '',
'placeholder': 'Titre'
}
)
)
place = forms.CharField(
label='Lieu',
widget=forms.TextInput(
attrs={
'placeholder': 'Lieu'
}
),
required=False
)
date_start = forms.DateTimeField(
label='Date de debut',
widget=forms.DateTimeInput(
attrs={
'placeholder': 'Date de début : jj/mm/aaaa hh:mm'
},
format='%d/%m/%Y %H:%m',
)
)
image = forms.ImageField(
required=False
)
type = forms.ChoiceField(
label='Type d\'événement',
widget=forms.RadioSelect,
choices=settings.EVENTS_MODEL_TYPES,
initial='O',
required=False,
)
is_draft = forms.ChoiceField(
label='Status de l\'événement',
widget=forms.RadioSelect,
choices=settings.MODEL_IS_DRAFT,
initial='1',
required=False,
)
text = forms.CharField(
label='Texte',
widget=forms.Textarea(
attrs={
'placeholder': 'Texte',
'rows': '15'
}
)
)
file = forms.FileField(
label=u'Fichier attaché',
allow_empty_file=True,
required=False,
)
def clean(self):
cleaned_data = super(EventForm, self).clean()
img = cleaned_data.get('image')
file = cleaned_data.get('file')
if img and img.size > settings.SIZE_MAX_IMG:
msg = (
u'Fichier trop lourd (%d Ko / %d Ko max). Pour ne pas saturer le serveur, merci '
u'de réduire la résolution de l\'image.') % (img.size/1024, settings.SIZE_MAX_IMG/1024)
self._errors['image'] = self.error_class([msg])
if 'image' in cleaned_data:
del cleaned_data['image']
if file and file.size > settings.SIZE_MAX_FILE:
msg = (
u'Fichier trop lourd (%d Ko / %d Ko max). Pour ne pas saturer le serveur, merci '
u'de réduire la taille du fichier.') % (img.size/1024, settings.SIZE_MAX_FILE/1024)
self._errors['file'] = self.error_class([msg])
if 'file' in cleaned_data:
del cleaned_data['file']
bad_word = False
title = cleaned_data.get('title')
for word in settings.FORBIDDEN_WORDS:
bad_word = bad_word or (word in title)
if bad_word:
msg = ('Erreur, un mot interdit a été utilisé. Regardez les sources ou contacter le dev.')
self._errors['title'] = self.error_class([msg])
return cleaned_data
| agpl-3.0 |
dcos/dcos | packages/dcos-integration-test/extra/test_misc.py | 1 | 1687 | # Various tests that don't fit into the other categories and don't make their own really.
import os
import yaml
from test_helpers import get_expanded_config
__maintainer__ = 'branden'
__contact__ = 'dcos-cluster-ops@mesosphere.io'
# Test that user config is loadable
# TODO(cmaloney): Validate it contains some settings we expact.
def test_load_user_config() -> None:
with open('/opt/mesosphere/etc/user.config.yaml', 'r') as f:
user_config = yaml.safe_load(f)
# Calculated parameters shouldn't be in the user config
assert 'master_quorum' not in user_config
# TODO(cmaloney): Test user provided parameters are present. All the
# platforms have different sets...
def test_expanded_config() -> None:
expanded_config = get_expanded_config()
# Caluclated parameters should be present
assert 'master_quorum' in expanded_config
# Defined and used parameters should be present
assert 'marathon_port' in expanded_config
assert 'mesos_master_port' in expanded_config
assert 'mesos_agent_port' in expanded_config
assert 'exhibitor_port' in expanded_config
assert 'mesos_dns_port' in expanded_config
assert 'metronome_port' in expanded_config
# TODO(cmaloney): Test user provided parameters are present. All the
# platforms have different sets...
def test_profile_symlink() -> None:
"""Assert the DC/OS profile script is symlinked from the correct source."""
expanded_config = get_expanded_config()
symlink_target = expanded_config['profile_symlink_target']
expected_symlink_source = expanded_config['profile_symlink_source']
assert expected_symlink_source == os.readlink(symlink_target)
| apache-2.0 |
plusvaliamarket/storable | node_modules/node-gyp/gyp/pylib/gyp/generator/dump_dependency_json.py | 1534 | 3426 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import os
import gyp
import gyp.common
import gyp.msvs_emulation
import json
import sys
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = False
generator_filelist_paths = {
}
generator_default_variables = {
}
for dirname in ['INTERMEDIATE_DIR', 'SHARED_INTERMEDIATE_DIR', 'PRODUCT_DIR',
'LIB_DIR', 'SHARED_LIB_DIR']:
# Some gyp steps fail if these are empty(!).
generator_default_variables[dirname] = 'dir'
for unused in ['RULE_INPUT_PATH', 'RULE_INPUT_ROOT', 'RULE_INPUT_NAME',
'RULE_INPUT_DIRNAME', 'RULE_INPUT_EXT',
'EXECUTABLE_PREFIX', 'EXECUTABLE_SUFFIX',
'STATIC_LIB_PREFIX', 'STATIC_LIB_SUFFIX',
'SHARED_LIB_PREFIX', 'SHARED_LIB_SUFFIX',
'CONFIGURATION_NAME']:
generator_default_variables[unused] = ''
def CalculateVariables(default_variables, params):
generator_flags = params.get('generator_flags', {})
for key, val in generator_flags.items():
default_variables.setdefault(key, val)
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
flavor = gyp.common.GetFlavor(params)
if flavor =='win':
# Copy additional generator configuration data from VS, which is shared
# by the Windows Ninja generator.
import gyp.generator.msvs as msvs_generator
generator_additional_non_configuration_keys = getattr(msvs_generator,
'generator_additional_non_configuration_keys', [])
generator_additional_path_sections = getattr(msvs_generator,
'generator_additional_path_sections', [])
gyp.msvs_emulation.CalculateCommonVariables(default_variables, params)
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
if generator_flags.get('adjust_static_libraries', False):
global generator_wants_static_library_dependencies_adjusted
generator_wants_static_library_dependencies_adjusted = True
toplevel = params['options'].toplevel_dir
generator_dir = os.path.relpath(params['options'].generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
toplevel, generator_dir, output_dir, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': toplevel,
'qualified_out_dir': qualified_out_dir,
}
def GenerateOutput(target_list, target_dicts, data, params):
# Map of target -> list of targets it depends on.
edges = {}
# Queue of targets to visit.
targets_to_visit = target_list[:]
while len(targets_to_visit) > 0:
target = targets_to_visit.pop()
if target in edges:
continue
edges[target] = []
for dep in target_dicts[target].get('dependencies', []):
edges[target].append(dep)
targets_to_visit.append(dep)
try:
filepath = params['generator_flags']['output_dir']
except KeyError:
filepath = '.'
filename = os.path.join(filepath, 'dump.json')
f = open(filename, 'w')
json.dump(edges, f)
f.close()
print 'Wrote json to %s.' % filename
| apache-2.0 |
Lh4cKg/brython | www/src/Lib/test/unittests/test_trace.py | 109 | 14823 | import os
import io
import sys
from test.support import (run_unittest, TESTFN, rmtree, unlink,
captured_stdout)
import unittest
import trace
from trace import CoverageResults, Trace
from test.tracedmodules import testmod
#------------------------------- Utilities -----------------------------------#
def fix_ext_py(filename):
"""Given a .pyc/.pyo filename converts it to the appropriate .py"""
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
return filename
def my_file_and_modname():
"""The .py file and module name of this file (__file__)"""
modname = os.path.splitext(os.path.basename(__file__))[0]
return fix_ext_py(__file__), modname
def get_firstlineno(func):
return func.__code__.co_firstlineno
#-------------------- Target functions for tracing ---------------------------#
#
# The relative line numbers of lines in these functions matter for verifying
# tracing. Please modify the appropriate tests if you change one of the
# functions. Absolute line numbers don't matter.
#
def traced_func_linear(x, y):
a = x
b = y
c = a + b
return c
def traced_func_loop(x, y):
c = x
for i in range(5):
c += y
return c
def traced_func_importing(x, y):
return x + y + testmod.func(1)
def traced_func_simple_caller(x):
c = traced_func_linear(x, x)
return c + x
def traced_func_importing_caller(x):
k = traced_func_simple_caller(x)
k += traced_func_importing(k, x)
return k
def traced_func_generator(num):
c = 5 # executed once
for i in range(num):
yield i + c
def traced_func_calling_generator():
k = 0
for i in traced_func_generator(10):
k += i
def traced_doubler(num):
return num * 2
def traced_caller_list_comprehension():
k = 10
mylist = [traced_doubler(i) for i in range(k)]
return mylist
class TracedClass(object):
def __init__(self, x):
self.a = x
def inst_method_linear(self, y):
return self.a + y
def inst_method_calling(self, x):
c = self.inst_method_linear(x)
return c + traced_func_linear(x, c)
@classmethod
def class_method_linear(cls, y):
return y * 2
@staticmethod
def static_method_linear(y):
return y * 2
#------------------------------ Test cases -----------------------------------#
class TestLineCounts(unittest.TestCase):
"""White-box testing of line-counting, via runfunc"""
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
self.my_py_filename = fix_ext_py(__file__)
def test_traced_func_linear(self):
result = self.tracer.runfunc(traced_func_linear, 2, 5)
self.assertEqual(result, 7)
# all lines are executed once
expected = {}
firstlineno = get_firstlineno(traced_func_linear)
for i in range(1, 5):
expected[(self.my_py_filename, firstlineno + i)] = 1
self.assertEqual(self.tracer.results().counts, expected)
def test_traced_func_loop(self):
self.tracer.runfunc(traced_func_loop, 2, 3)
firstlineno = get_firstlineno(traced_func_loop)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
(self.my_py_filename, firstlineno + 2): 6,
(self.my_py_filename, firstlineno + 3): 5,
(self.my_py_filename, firstlineno + 4): 1,
}
self.assertEqual(self.tracer.results().counts, expected)
def test_traced_func_importing(self):
self.tracer.runfunc(traced_func_importing, 2, 5)
firstlineno = get_firstlineno(traced_func_importing)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
(fix_ext_py(testmod.__file__), 2): 1,
(fix_ext_py(testmod.__file__), 3): 1,
}
self.assertEqual(self.tracer.results().counts, expected)
def test_trace_func_generator(self):
self.tracer.runfunc(traced_func_calling_generator)
firstlineno_calling = get_firstlineno(traced_func_calling_generator)
firstlineno_gen = get_firstlineno(traced_func_generator)
expected = {
(self.my_py_filename, firstlineno_calling + 1): 1,
(self.my_py_filename, firstlineno_calling + 2): 11,
(self.my_py_filename, firstlineno_calling + 3): 10,
(self.my_py_filename, firstlineno_gen + 1): 1,
(self.my_py_filename, firstlineno_gen + 2): 11,
(self.my_py_filename, firstlineno_gen + 3): 10,
}
self.assertEqual(self.tracer.results().counts, expected)
def test_trace_list_comprehension(self):
self.tracer.runfunc(traced_caller_list_comprehension)
firstlineno_calling = get_firstlineno(traced_caller_list_comprehension)
firstlineno_called = get_firstlineno(traced_doubler)
expected = {
(self.my_py_filename, firstlineno_calling + 1): 1,
# List compehentions work differently in 3.x, so the count
# below changed compared to 2.x.
(self.my_py_filename, firstlineno_calling + 2): 12,
(self.my_py_filename, firstlineno_calling + 3): 1,
(self.my_py_filename, firstlineno_called + 1): 10,
}
self.assertEqual(self.tracer.results().counts, expected)
def test_linear_methods(self):
# XXX todo: later add 'static_method_linear' and 'class_method_linear'
# here, once issue1764286 is resolved
#
for methname in ['inst_method_linear',]:
tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
traced_obj = TracedClass(25)
method = getattr(traced_obj, methname)
tracer.runfunc(method, 20)
firstlineno = get_firstlineno(method)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
}
self.assertEqual(tracer.results().counts, expected)
class TestRunExecCounts(unittest.TestCase):
"""A simple sanity test of line-counting, via runctx (exec)"""
def setUp(self):
self.my_py_filename = fix_ext_py(__file__)
self.addCleanup(sys.settrace, sys.gettrace())
def test_exec_counts(self):
self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
code = r'''traced_func_loop(2, 5)'''
code = compile(code, __file__, 'exec')
self.tracer.runctx(code, globals(), vars())
firstlineno = get_firstlineno(traced_func_loop)
expected = {
(self.my_py_filename, firstlineno + 1): 1,
(self.my_py_filename, firstlineno + 2): 6,
(self.my_py_filename, firstlineno + 3): 5,
(self.my_py_filename, firstlineno + 4): 1,
}
# When used through 'run', some other spurious counts are produced, like
# the settrace of threading, which we ignore, just making sure that the
# counts fo traced_func_loop were right.
#
for k in expected.keys():
self.assertEqual(self.tracer.results().counts[k], expected[k])
class TestFuncs(unittest.TestCase):
"""White-box testing of funcs tracing"""
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
self.tracer = Trace(count=0, trace=0, countfuncs=1)
self.filemod = my_file_and_modname()
def test_simple_caller(self):
self.tracer.runfunc(traced_func_simple_caller, 1)
expected = {
self.filemod + ('traced_func_simple_caller',): 1,
self.filemod + ('traced_func_linear',): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)
def test_loop_caller_importing(self):
self.tracer.runfunc(traced_func_importing_caller, 1)
expected = {
self.filemod + ('traced_func_simple_caller',): 1,
self.filemod + ('traced_func_linear',): 1,
self.filemod + ('traced_func_importing_caller',): 1,
self.filemod + ('traced_func_importing',): 1,
(fix_ext_py(testmod.__file__), 'testmod', 'func'): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'pre-existing trace function throws off measurements')
def test_inst_method_calling(self):
obj = TracedClass(20)
self.tracer.runfunc(obj.inst_method_calling, 1)
expected = {
self.filemod + ('TracedClass.inst_method_calling',): 1,
self.filemod + ('TracedClass.inst_method_linear',): 1,
self.filemod + ('traced_func_linear',): 1,
}
self.assertEqual(self.tracer.results().calledfuncs, expected)
class TestCallers(unittest.TestCase):
"""White-box testing of callers tracing"""
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
self.tracer = Trace(count=0, trace=0, countcallers=1)
self.filemod = my_file_and_modname()
@unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
'pre-existing trace function throws off measurements')
def test_loop_caller_importing(self):
self.tracer.runfunc(traced_func_importing_caller, 1)
expected = {
((os.path.splitext(trace.__file__)[0] + '.py', 'trace', 'Trace.runfunc'),
(self.filemod + ('traced_func_importing_caller',))): 1,
((self.filemod + ('traced_func_simple_caller',)),
(self.filemod + ('traced_func_linear',))): 1,
((self.filemod + ('traced_func_importing_caller',)),
(self.filemod + ('traced_func_simple_caller',))): 1,
((self.filemod + ('traced_func_importing_caller',)),
(self.filemod + ('traced_func_importing',))): 1,
((self.filemod + ('traced_func_importing',)),
(fix_ext_py(testmod.__file__), 'testmod', 'func')): 1,
}
self.assertEqual(self.tracer.results().callers, expected)
# Created separately for issue #3821
class TestCoverage(unittest.TestCase):
def setUp(self):
self.addCleanup(sys.settrace, sys.gettrace())
def tearDown(self):
rmtree(TESTFN)
unlink(TESTFN)
def _coverage(self, tracer,
cmd='from test import test_pprint; test_pprint.test_main()'):
tracer.run(cmd)
r = tracer.results()
r.write_results(show_missing=True, summary=True, coverdir=TESTFN)
def test_coverage(self):
tracer = trace.Trace(trace=0, count=1)
with captured_stdout() as stdout:
self._coverage(tracer)
stdout = stdout.getvalue()
self.assertTrue("pprint.py" in stdout)
self.assertTrue("case.py" in stdout) # from unittest
files = os.listdir(TESTFN)
self.assertTrue("pprint.cover" in files)
self.assertTrue("unittest.case.cover" in files)
def test_coverage_ignore(self):
# Ignore all files, nothing should be traced nor printed
libpath = os.path.normpath(os.path.dirname(os.__file__))
# sys.prefix does not work when running from a checkout
tracer = trace.Trace(ignoredirs=[sys.base_prefix, sys.base_exec_prefix,
libpath], trace=0, count=1)
with captured_stdout() as stdout:
self._coverage(tracer)
if os.path.exists(TESTFN):
files = os.listdir(TESTFN)
self.assertEqual(files, ['_importlib.cover']) # Ignore __import__
def test_issue9936(self):
tracer = trace.Trace(trace=0, count=1)
modname = 'test.tracedmodules.testmod'
# Ensure that the module is executed in import
if modname in sys.modules:
del sys.modules[modname]
cmd = ("import test.tracedmodules.testmod as t;"
"t.func(0); t.func2();")
with captured_stdout() as stdout:
self._coverage(tracer, cmd)
stdout.seek(0)
stdout.readline()
coverage = {}
for line in stdout:
lines, cov, module = line.split()[:3]
coverage[module] = (int(lines), int(cov[:-1]))
# XXX This is needed to run regrtest.py as a script
modname = trace._fullmodname(sys.modules[modname].__file__)
self.assertIn(modname, coverage)
self.assertEqual(coverage[modname], (5, 100))
### Tests that don't mess with sys.settrace and can be traced
### themselves TODO: Skip tests that do mess with sys.settrace when
### regrtest is invoked with -T option.
class Test_Ignore(unittest.TestCase):
def test_ignored(self):
jn = os.path.join
ignore = trace._Ignore(['x', 'y.z'], [jn('foo', 'bar')])
self.assertTrue(ignore.names('x.py', 'x'))
self.assertFalse(ignore.names('xy.py', 'xy'))
self.assertFalse(ignore.names('y.py', 'y'))
self.assertTrue(ignore.names(jn('foo', 'bar', 'baz.py'), 'baz'))
self.assertFalse(ignore.names(jn('bar', 'z.py'), 'z'))
# Matched before.
self.assertTrue(ignore.names(jn('bar', 'baz.py'), 'baz'))
class TestDeprecatedMethods(unittest.TestCase):
def test_deprecated_usage(self):
sio = io.StringIO()
with self.assertWarns(DeprecationWarning):
trace.usage(sio)
self.assertIn('Usage:', sio.getvalue())
def test_deprecated_Ignore(self):
with self.assertWarns(DeprecationWarning):
trace.Ignore()
def test_deprecated_modname(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual("spam", trace.modname("spam"))
def test_deprecated_fullmodname(self):
with self.assertWarns(DeprecationWarning):
self.assertEqual("spam", trace.fullmodname("spam"))
def test_deprecated_find_lines_from_code(self):
with self.assertWarns(DeprecationWarning):
def foo():
pass
trace.find_lines_from_code(foo.__code__, ["eggs"])
def test_deprecated_find_lines(self):
with self.assertWarns(DeprecationWarning):
def foo():
pass
trace.find_lines(foo.__code__, ["eggs"])
def test_deprecated_find_strings(self):
with open(TESTFN, 'w') as fd:
self.addCleanup(unlink, TESTFN)
with self.assertWarns(DeprecationWarning):
trace.find_strings(fd.name)
def test_deprecated_find_executable_linenos(self):
with open(TESTFN, 'w') as fd:
self.addCleanup(unlink, TESTFN)
with self.assertWarns(DeprecationWarning):
trace.find_executable_linenos(fd.name)
def test_main():
run_unittest(__name__)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
Sofia2/python-api | src/ssap/utils/logs.py | 1 | 1849 | # -*- coding: utf8 -*-
'''
Python SSAP API
Version 1.5
© Indra Sistemas, S.A.
2014 SPAIN
All rights reserved
'''
import logging
import sys
class LogFactory(object):
'''
This class configures a Python logger.
'''
__line_pattern = '%(asctime)s %(name)s [%(levelname)s] - %(message)s'
DEFAULT_LOG_FILE = ""
@staticmethod
def __getLogger(obj, level):
'''
Creates and returns a logger. The logger will be named after the class that uses it.
Keyword arguments:
obj -- an instance of the class that will use the logger.
level -- the minimum logging level (i.e. "DEBUG", "INFO",...).
'''
logger = logging.getLogger(type(obj).__name__)
logger.setLevel(level)
return logger
@staticmethod
def __getHandler(output_file=""):
'''
Configures a logging handler.
Keyword arguments:
output_file -- the output file that the logger will use.
'''
if (len(output_file) != 0) :
handler = logging.FileHandler(output_file)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter(LogFactory.__line_pattern))
return handler
@staticmethod
def configureLogger(obj, level, logfile):
'''
Configures a logger
Keyword arguments:
obj -- the object that will use the logger.
level -- the minimum logging level (i.e. "DEBUG", "INFO",...).
logfile -- the output file that the logger will use.
'''
logger = LogFactory.__getLogger(obj, level)
logger.addHandler(LogFactory.__getHandler(logfile))
return logger | apache-2.0 |
googed/banner-bot | banner updater.py | 1 | 21874 |
# The main subreddit's sidebar must include strings to denote the beginning and ending location of the list, the bot will not update the sidebar if these strings are not present
# With the default delimiters the sidebar should include a chunk of text like:
# [](#banner_start)
# banner text here
# [](#banner_end)
from ConfigParser import SafeConfigParser
from datetime import datetime, timedelta
import HTMLParser
import logging, logging.config, re, sys, os
from time import time
from dateutil import parser, rrule, tz
import praw
from requests.exceptions import HTTPError
from sqlalchemy import create_engine
from sqlalchemy import Boolean, Column, DateTime, String, Text, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound
import yaml
import random
import requests
from imgurpython import ImgurClient
# global reddit session
r = None
cfg_file = SafeConfigParser()
path_to_cfg = os.path.abspath(os.path.dirname(sys.argv[0]))
path_to_cfg = os.path.join(path_to_cfg, 'schedulebot.cfg')
cfg_file.read(path_to_cfg)
if cfg_file.get('database', 'system').lower() == 'sqlite':
engine = create_engine(
cfg_file.get('database', 'system')+':///'+\
cfg_file.get('database', 'database'))
else:
engine = create_engine(
cfg_file.get('database', 'system')+'://'+\
cfg_file.get('database', 'username')+':'+\
cfg_file.get('database', 'password')+'@'+\
cfg_file.get('database', 'host')+'/'+\
cfg_file.get('database', 'database'))
print "engine running..."
Base = declarative_base()
Session = sessionmaker(bind=engine, expire_on_commit=False)
session = Session()
class Subreddit(Base):
"""Table containing the subreddits for the bot to monitor.
name - The subreddit's name. "gaming", not "/r/gaming".
enabled - Subreddit schedule will not be executed if False
schedule_yaml - YAML definition of the subreddit's schedule
updated - Time that the subreddit was last updated (UTC)
"""
__tablename__ = 'schedule'
name = Column(Text, nullable=False, primary_key=True)
enabled = Column(Integer, nullable=False, default=1)
schedule_yaml = Column(Text)
updated = Column(Integer, nullable=False)
banner_limit = Column(Integer, nullable = False, default=1)
banner_name = Column(Text, nullable = False, default='banner')
class ScheduledEvent(object):
_defaults = {'repeat': None,
'rrule': None,
'url': None,
'title': None}
repeat_regex = re.compile(r'^(\d+)\s+(minute|hour|day|week|month|year)s?$')
url_regex = re.compile(r'^https?:\/\/imgur\.com\/(a|gallery)\/\w+\/?$')
freq_dict = {'minute': rrule.MINUTELY,
'hour': rrule.HOURLY,
'day': rrule.DAILY,
'week': rrule.WEEKLY,
'month': rrule.MONTHLY,
'year': rrule.YEARLY,
}
def __init__(self, values, default=None):
values = lowercase_keys_recursively(values)
# anything not defined in the "values" dict will be defaulted
init = self._defaults.copy()
init.update(values)
# convert the dict to attributes
self.__dict__.update(init)
try:
self.first = parser.parse(self.first)#, default=default)
if not self.first.tzinfo:
self.first = self.first.replace(tzinfo=tz.tzutc())
except Exception as e:
raise ValueError('Error parsing date from `first`.')
try:
if self.repeat:
match = self.repeat_regex.match(self.repeat)
interval = int(match.group(1))
if interval == 0:
raise ValueError('Invalid repeat interval.')
self.rrule = rrule.rrule(self.freq_dict[match.group(2)],
interval=interval,
dtstart=self.first)
elif self.rrule:
self.rrule = rrule.rrulestr(self.rrule, dtstart=self.first)
except Exception as e:
raise ValueError('Error parsing repeat interval.')
try:
if self.title:
self.title = self.replace_placeholders(self.title)
except Exception as e:
raise ValueError('Error in title')
def is_due(self, start_time, end_time):
if self.rrule and self.rrule.before(start_time, inc=True):
print "Due now? %s: %s" %(bool(self.rrule.between(start_time, end_time, inc=True)), self.title)
print 'next recurrence', self.rrule.after(start_time, inc=True)
return bool(self.rrule.between(start_time, end_time, inc=True)), start_time - self.rrule.before(start_time, inc=True), self.title
else:
print "%s: %s - %s" %("Not started or ended", self.title, self.first)
return start_time <= self.first <= end_time, start_time - end_time, self.title
## def is_album(self, user, COUNT, LIMIT):
## valid_images = 0
## client = ImgurClient(cfg_file.get('imgur', 'client_id'), cfg_file.get('imgur', 'client_secret'))
## album_id = get_album_id(self.url)
## album = client.get_album(album_id)
## if COUNT < LIMIT:
## print('Not enough images!')
## return False
## for image in album.images:
## if image['size'] > 512000:
## valid_images -= 1
## if (COUNT+valid_images) < LIMIT:
## return False
## return True
##
def execute(self, subreddit, BANNER, LIMIT):
global r
client = ImgurClient(cfg_file.get('imgur', 'client_id'), cfg_file.get('imgur', 'client_secret'))
album_id = get_album_id(self.url)
album = client.get_album(album_id)
album_title = self.title
album = album.images
COUNT = len(album)
if COUNT < LIMIT:
print('Not enough images!')
send_error_message(cfg_file.get('reddit', 'owner_username'), subreddit.display_name, 'Not enough '
' images in album ["{0}"]({1})'.format(album_title, self.url))
return
# Pick x random ones if greater than limit
if COUNT > LIMIT:
album = random.sample(album, COUNT)
banner_number = 0
sidebar_format = '* [{title}]({link} "{desc}")'
sidebar_lines = []
bigpic = []
for image in album:
if image['size'] > 512000:
print ('too big: %s' %(image['link']))
title = '{0} - ({1} kB) - {2}px x {3}px'.format(image['link'], float(image['size'])/1000, image['width'], image['height'])
bigpic.append(sidebar_format.format(title=title, link=image['link'], desc=image['description']))
continue
banner_number += 1
url = image['link']
local_name = localize_name(album_id, url)
download_image(url, local_name)
title = image['title'] if image['title'] else 'Untitled'
description = image['description'] if image['description'] else ' '
line = sidebar_format.format(title=title, link='#s', desc=description)
css_name = BANNER + '%d' % banner_number
print('%s: adding %s to stylesheet...' % (subreddit, css_name))
try:
r.upload_image(subreddit, local_name, css_name)
except Exception as e:
print (e)
return
sidebar_lines.append(line)
if banner_number >= LIMIT:
break
if banner_number < LIMIT:
print ('Not enough valid images')
send_error_message(cfg_file.get('reddit', 'owner_username'), subreddit.display_name, 'Not enough valid'
' images in album ["{0}"]({1}); check that the following image sizes are less than 500kB. '
'Images ideally should be greater than 300px wide and 1:1 or greater aspect ratio: \n\n{2}'.format(album_title, self.url, '\n'.join(bigpic)))
return
bar = '\n'.join(sidebar_lines)
bar = '##### ' + album_title + '\n' + bar + '\n\n'
r.config.decode_html_entities = True
current_sidebar = subreddit.get_settings()['description']
current_sidebar = HTMLParser.HTMLParser().unescape(current_sidebar)
replace_pattern = re.compile('%s.*?%s' % (re.escape(cfg_file.get('reddit', 'start_delimiter')), re.escape(cfg_file.get('reddit', 'end_delimiter'))), re.IGNORECASE|re.DOTALL|re.UNICODE)
new_sidebar = re.sub(replace_pattern,
'%s\\n\\n%s\\n%s' % (cfg_file.get('reddit', 'start_delimiter'), bar, cfg_file.get('reddit', 'end_delimiter')),
current_sidebar)
r.update_settings(subreddit, description=new_sidebar)
print ('%s sidebar updated!' %subreddit)
subreddit.set_stylesheet(subreddit.get_stylesheet()['stylesheet'])
print ('%s stylesheet set!' %subreddit)
if bigpic:
send_error_message(cfg_file.get('reddit', 'owner_username'), subreddit.display_name, 'The following '
' images in album ["{0}"]({1}) were not valid and were skipped; check that the following image sizes are less than 500kB. '
'Images ideally should be greater than 300px wide and 1:1 or greater aspect ratio: \n\n{2}'.format(album_title, self.url, '\n'.join(bigpic)))
def error_album (error):
pass
def replace_placeholders(self, string):
date_regex = re.compile(r'\{\{date([+-]\d+)?\s+([^}]+?)\}\}')
now = datetime.now(self.first.tzinfo)
match = date_regex.search(string)
while match:
date = now
if match.group(1):
offset = int(match.group(1))
date += timedelta(days=offset)
format_str = match.group(2)
string = date_regex.sub(date.strftime(format_str), string, count=1)
match = date_regex.search(string)
return string
def download_image(url, local_name):
if os.path.exists(local_name):
return
location = os.path.split(local_name)[0]
if not os.path.exists(location):
os.makedirs(location)
page = requests.get(url)
image = page.content
with open(local_name, 'wb') as f:
f.write(image)
def localize_name(album_id, image_link):
image_name = image_link.split('/')[-1]
return os.path.join('images', album_id, image_name)
def get_album_id(album_url):
album_url = album_url.replace('/gallery/', '/a/')
album_id = album_url.split('/a/')[-1].split('/')[0]
return album_id
def update_from_wiki(subreddit, requester):
print "Updating events from the %s wiki." %subreddit
global r
username = cfg_file.get('reddit', 'username')
try:
page = subreddit.get_wiki_page(cfg_file.get('reddit', 'wiki_page_name'))
except Exception:
send_error_message(requester, subreddit.display_name,
'The wiki page could not be accessed. Please ensure the page '
'http://www.reddit.com/r/{0}/wiki/{1} exists and that {2} '
'has the "wiki" mod permission to be able to access it.'
.format(subreddit.display_name,
cfg_file.get('reddit', 'wiki_page_name'),
username))
return False
html_parser = HTMLParser.HTMLParser()
page_content = html_parser.unescape(page.content_md)
# check that all the events are valid yaml
event_defs = yaml.safe_load_all(page_content)
event_num = 1
try:
for event_def in event_defs:
event_num += 1
except Exception as e:
indented = ''
for line in str(e).split('\n'):
indented += ' {0}\n'.format(line)
send_error_message(requester, subreddit.display_name,
'Error when reading schedule from wiki - '
'Syntax invalid in section #{0}:\n\n{1}'
.format(event_num, indented))
return False
# reload and actually process the events
event_defs = yaml.safe_load_all(page_content)
event_num = 1
kept_sections = []
for event_def in event_defs:
# ignore any non-dict sections (can be used as comments, etc.)
if not isinstance(event_def, dict):
continue
event_def = lowercase_keys_recursively(event_def)
try:
check_event_valid(event_def)
event = ScheduledEvent(event_def)
except ValueError as e:
send_error_message(requester, subreddit.display_name,
'Invalid event in section #{0} - {1}'
.format(event_num, e))
return False
event_num += 1
kept_sections.append(event_def)
# Update the subreddit, or add it if necessary
try:
db_subreddit = (session.query(Subreddit)
.filter(Subreddit.name == subreddit.display_name.lower())
.one())
except NoResultFound:
db_subreddit = Subreddit()
db_subreddit.name = subreddit.display_name.lower()
session.add(db_subreddit)
db_subreddit.updated = datetime.utcnow()
db_subreddit.schedule_yaml = page_content
session.commit()
logging.info("Update from wiki complete")
## r.send_message(requester,
## '{0} schedule updated'.format(username),
## "{0}'s schedule was successfully updated for /r/{1}"
## .format(username, subreddit.display_name))
return True
def lowercase_keys_recursively(subject):
"""Recursively lowercases all keys in a dict."""
lowercased = dict()
for key, val in subject.iteritems():
if isinstance(val, dict):
val = lowercase_keys_recursively(val)
lowercased[key.lower()] = val
return lowercased
def check_event_valid(event):
"""Checks if an event defined on a wiki page is valid."""
print "Validating wiki events..."
validate_keys(event)
validate_values_not_empty(event)
validate_type(event, 'first', basestring)
validate_type(event, 'repeat', basestring)
validate_type(event, 'rrule', basestring)
validate_type(event, 'title', basestring)
validate_regex(event, 'url', ScheduledEvent.url_regex)
validate_regex(event, 'repeat', ScheduledEvent.repeat_regex)
def validate_values_not_empty(check):
for key, val in check.iteritems():
if isinstance(val, dict):
validate_values_not_empty(val)
elif (val is None or
(isinstance(val, (basestring, list)) and len(val) == 0)):
raise ValueError('`{0}` set to an empty value'.format(key))
def validate_keys(check):
valid_keys = set(['first', 'rrule', 'title', 'url'])
valid_keys |= set(ScheduledEvent._defaults.keys())
for key in check:
if key not in valid_keys:
raise ValueError('Invalid variable: `{0}`'.format(key))
# make sure that all of the required keys are being set
if ('title' not in check or 'first' not in check or
'url' not in check):
raise ValueError('All the required variables were not set.')
def validate_type(check, key, req_type):
if key not in check:
return
if req_type == int:
try:
int(str(check[key]))
except ValueError:
raise ValueError('{0} must be an integer'.format(key))
else:
if not isinstance(check[key], req_type):
raise ValueError('{0} must be {1}'.format(key, req_type))
def validate_regex(check, key, pattern):
if key not in check:
return
if not re.match(pattern, check[key]):
raise ValueError('Invalid {0}: {1}'.format(key, check[key]))
def send_error_message(user, sr_name, error):
"""Sends an error message to the user if a wiki update failed."""
global r
r.send_message(user,
'Error processing wiki in /r/{0}'.format(sr_name),
'**Error updating from [wiki configuration in /r/{0}]'
'(http://www.reddit.com/r/{0}/wiki/{1})**:\n\n---\n\n{2}'
.format(sr_name,
cfg_file.get('reddit', 'wiki_page_name'),
error))
def process_messages():
global r
stop_time = int(cfg_file.get('reddit', 'last_message'))
owner_username = cfg_file.get('reddit', 'owner_username')
new_last_message = None
update_srs = set()
invite_srs = set()
logging.debug('Reading messages and commands...')
try:
for message in r.get_inbox():
if int(message.created_utc) <= stop_time:
break
if message.was_comment:
continue
if not new_last_message:
new_last_message = int(message.created_utc)
if message.body.strip().lower() == 'schedule':
# handle if they put in something like '/r/' in the subject
if '/' in message.subject:
sr_name = message.subject[message.subject.rindex('/')+1:]
else:
sr_name = message.subject
if (sr_name.lower(), message.author.name) in update_srs:
continue
try:
subreddit = r.get_subreddit(sr_name)
if (message.author.name == owner_username or
message.author in subreddit.get_moderators()):
update_srs.add((sr_name.lower(), message.author.name))
else:
send_error_message(message.author, sr_name,
'You do not moderate /r/{0}'.format(sr_name))
except HTTPError as e:
send_error_message(message.author, sr_name,
'Unable to access /r/{0}'.format(sr_name))
# do requested updates from wiki pages
updated_srs = []
for subreddit, sender in update_srs:
if update_from_wiki(r.get_subreddit(subreddit),
r.get_redditor(sender)):
updated_srs.append(subreddit)
logging.info('Updated from wiki in /r/{0}'.format(subreddit))
else:
logging.info('Error updating from wiki in /r/{0}'
.format(subreddit))
except Exception as e:
logging.error('ERROR: {0}'.format(e))
raise
finally:
# update cfg with new last_message value
if new_last_message:
cfg_file.set('reddit', 'last_message', str(new_last_message))
cfg_file.write(open(path_to_cfg, 'w'))
def main():
global r
global client
logging.config.fileConfig(path_to_cfg)
start_timestamp = int(time())
start_time = datetime.utcfromtimestamp(start_timestamp)
start_time = start_time.replace(tzinfo=tz.tzutc())
print "Start time %s" %start_time
last_run = int(cfg_file.get('reddit', 'last_run'))
last_run = datetime.utcfromtimestamp(last_run)
last_run = last_run.replace(tzinfo=tz.tzutc())
## cfg_file.set('reddit', 'last_run', str(start_timestamp))
## cfg_file.write(open(path_to_cfg, 'w'))
while True:
try:
r = praw.Reddit(user_agent=cfg_file.get('reddit', 'user_agent'))
logging.debug('Logging in as {0}'
.format(cfg_file.get('reddit', 'username')))
r.login(cfg_file.get('reddit', 'username'),
cfg_file.get('reddit', 'password'), disable_warning=True)
break
except Exception as e:
logging.error('ERROR: {0}'.format(e))
# check for update messages
logging.info("checking for update messages")
try:
process_messages()
except KeyboardInterrupt:
raise
except Exception as e:
logging.error('ERROR: {0}'.format(e))
session.rollback()
subreddits = (session.query(Subreddit)
.filter(Subreddit.enabled == 1)
.all())
for sr in subreddits:
LIMIT = sr.banner_limit
BANNER = sr.banner_name
schedule = [ScheduledEvent(d, sr.updated)
for d in yaml.safe_load_all(sr.schedule_yaml)
if isinstance(d, dict)]
title = ""
event_due = ""
past_due = timedelta(days=999999999)
for event in schedule:
mc = event.is_due(last_run, start_time)
if mc[0] and mc[1]:
if mc[1] < past_due:
past_due = mc[1]
event_due = event
title = mc[2]
if event_due:
try:
print ('executing', title, event_due)
event_due.execute(r.get_subreddit(sr.name), BANNER, LIMIT)
except KeyboardInterrupt:
raise
except Exception as e:
logging.error('ERROR in /r/{0}: {1}. Rolling back'.format(sr.name, e))
session.rollback()
cfg_file.set('reddit', 'last_run', str(start_timestamp))
cfg_file.write(open(path_to_cfg, 'w'))
if __name__ == '__main__':
main()
| mit |
zarafagroupware/zarafa-zsm | webservice/libzsm/rest_client/logutils.py | 3 | 4601 | # Copyright 2012 - 2013 Zarafa B.V.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation with the following additional
# term according to sec. 7:
#
# According to sec. 7 of the GNU Affero General Public License, version
# 3, the terms of the AGPL are supplemented with the following terms:
#
# "Zarafa" is a registered trademark of Zarafa B.V. The licensing of
# the Program under the AGPL does not imply a trademark license.
# Therefore any rights, title and interest in our trademarks remain
# entirely with us.
#
# However, if you propagate an unmodified version of the Program you are
# allowed to use the term "Zarafa" to indicate that you distribute the
# Program. Furthermore you may use our trademarks where it is necessary
# to indicate the intended purpose of a product or service provided you
# use it in accordance with honest practices in industrial or commercial
# matters. If you want to propagate modified versions of the Program
# under the name "Zarafa" or "Zarafa Server", you may only do so if you
# have a written permission by Zarafa B.V. (to acquire a permission
# please contact Zarafa at trademark@zarafa.com).
#
# The interactive user interface of the software displays an attribution
# notice containing the term "Zarafa" and/or the logo of Zarafa.
# Interactive user interfaces of unmodified and modified versions must
# display Appropriate Legal Notices according to sec. 5 of the GNU
# Affero General Public License, version 3, when you propagate
# unmodified or modified versions of the Program. In accordance with
# sec. 7 b) of the GNU Affero General Public License, version 3, these
# Appropriate Legal Notices must retain the logo of Zarafa or display
# the words "Initial Development by Zarafa" if the display of the logo
# is not reasonably feasible for technical reasons. The use of the logo
# of Zarafa in Legal Notices is allowed for unmodified and modified
# versions of the software.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import re
import os
formatter = logging.Formatter('%(asctime)s %(name)-8s %(levelname)-6s %(message)s')
_logger = None
CHATTY_LOGGERS = [
'apirequest',
'requests',
]
# python 2.7
class NullHandler(logging.Handler):
def createLock(self):
self.lock = None
def handle(self, record):
pass
def emit(self, record):
pass
def get_logger(name, level=logging.WARN):
global _logger
if re.search('[./]', name):
name = os.path.basename(name)
name, _ = os.path.splitext(name)
if not _logger:
_logger = logging.getLogger(name)
_logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
_logger.addHandler(stream_handler)
# remove propagation for chatty nested loggers
for pkgname, logger in _logger.manager.loggerDict.items():
for pkgpat in CHATTY_LOGGERS:
if pkgpat in pkgname:
# fix requests.packages.urllib3.connectionpool complaining
# about missing handler
if not getattr(logger, 'handlers', None):
if hasattr(logger, 'addHandler'):
logger.addHandler(NullHandler())
logger.propagate = False
return _logger
def format_request(request):
s = u'{0} {1}'.format(request.method, request.url)
if request.headers:
for key, val in sorted(request.headers.items()):
s += u'\n{0}: {1}'.format(key.title(), val)
s += u'\n'
if request.body:
s += u'\n{0}'.format(request.body.decode(request.encoding))
return s
def format_response(response):
reason = getattr(getattr(response.raw, '_original_response', None), 'reason', '')
s = u'HTTP {0} {1}'.format(response.status_code, reason)
if response.headers:
for key, val in sorted(response.headers.items()):
s += u'\n{0}: {1}'.format(key.title(), val)
s += u'\n'
if response.text:
s += u'\n{0}'.format(response.text)
return s
| agpl-3.0 |
MarcosCommunity/odoo | addons/account_check_writing/account.py | 379 | 2032 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv,fields
class account_journal(osv.osv):
_inherit = "account.journal"
_columns = {
'allow_check_writing': fields.boolean('Allow Check writing', help='Check this if the journal is to be used for writing checks.'),
'use_preprint_check': fields.boolean('Use Preprinted Check', help='Check if you use a preformated sheet for check'),
}
class res_company(osv.osv):
_inherit = "res.company"
_columns = {
'check_layout': fields.selection([
('top', 'Check on Top'),
('middle', 'Check in middle'),
('bottom', 'Check on bottom'),
],"Check Layout",
help="Check on top is compatible with Quicken, QuickBooks and Microsoft Money. Check in middle is compatible with Peachtree, ACCPAC and DacEasy. Check on bottom is compatible with Peachtree, ACCPAC and DacEasy only" ),
}
_defaults = {
'check_layout' : lambda *a: 'top',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sudheesh001/oh-mainline | vendor/packages/python-social-auth/social/tests/backends/test_dropbox.py | 92 | 1042 | import json
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class DropboxOAuth1Test(OAuth1Test):
backend_path = 'social.backends.dropbox.DropboxOAuth'
user_data_url = 'https://api.dropbox.com/1/account/info'
expected_username = '10101010'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({
'referral_link': 'https://www.dropbox.com/referrals/foobar',
'display_name': 'Foo Bar',
'uid': 10101010,
'country': 'US',
'quota_info': {
'shared': 138573,
'quota': 2952790016,
'normal': 157327
},
'email': 'foo@bar.com'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| agpl-3.0 |
aferr/TemporalPartitioningMemCtl | src/arch/x86/isa/insts/romutil.py | 65 | 7401 | # Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
intCodeTemplate = '''
def rom
{
# This vectors the CPU into an interrupt handler in long mode.
# On entry, t1 is set to the vector of the interrupt and t7 is the current
# ip. We need that because rdip returns the next ip.
extern %(startLabel)s:
#
# Get the 64 bit interrupt or trap gate descriptor from the IDT
#
# Load the gate descriptor from the IDT
slli t4, t1, 4, dataSize=8
ld t2, idtr, [1, t0, t4], 8, dataSize=8, addressSize=8, atCPL0=True
ld t4, idtr, [1, t0, t4], dataSize=8, addressSize=8, atCPL0=True
# Make sure the descriptor is a legal gate.
chks t1, t4, %(gateCheckType)s
#
# Get the target CS descriptor using the selector in the gate
# descriptor.
#
srli t10, t4, 16, dataSize=8
andi t5, t10, 0xF8, dataSize=8
andi t0, t10, 0x4, flags=(EZF,), dataSize=2
br rom_local_label("%(startLabel)s_globalDescriptor"), flags=(CEZF,)
ld t3, tsl, [1, t0, t5], dataSize=8, addressSize=8, atCPL0=True
br rom_local_label("%(startLabel)s_processDescriptor")
%(startLabel)s_globalDescriptor:
ld t3, tsg, [1, t0, t5], dataSize=8, addressSize=8, atCPL0=True
%(startLabel)s_processDescriptor:
chks t10, t3, IntCSCheck, dataSize=8
wrdl hs, t3, t10, dataSize=8
# Stick the target offset in t9.
wrdh t9, t4, t2, dataSize=8
#
# Figure out where the stack should be
#
# Record what we might set the stack selector to.
rdsel t11, ss
# Check if we're changing privelege level. At this point we can assume
# we're going to a DPL that's less than or equal to the CPL.
rdattr t10, hs, dataSize=8
andi t10, t10, 3, dataSize=8
rdattr t5, cs, dataSize=8
andi t5, t5, 0x3, dataSize=8
sub t0, t5, t10, flags=(EZF,), dataSize=8
# We're going to change priviledge, so zero out the stack selector. We
# need to let the IST have priority so we don't branch yet.
mov t11, t0, t0, flags=(nCEZF,)
# Check the IST field of the gate descriptor
srli t12, t4, 32, dataSize=8
andi t12, t12, 0x7, dataSize=8
subi t0, t12, 1, flags=(ECF,), dataSize=8
br rom_local_label("%(startLabel)s_istStackSwitch"), flags=(nCECF,)
br rom_local_label("%(startLabel)s_cplStackSwitch"), flags=(nCEZF,)
# If we're here, it's because the stack isn't being switched.
# Set t6 to the new aligned rsp.
mov t6, t6, rsp, dataSize=8
br rom_local_label("%(startLabel)s_stackSwitched")
%(startLabel)s_istStackSwitch:
ld t6, tr, [8, t12, t0], 0x1c, dataSize=8, addressSize=8, atCPL0=True
br rom_local_label("%(startLabel)s_stackSwitched")
%(startLabel)s_cplStackSwitch:
# Get the new rsp from the TSS
ld t6, tr, [8, t10, t0], 4, dataSize=8, addressSize=8, atCPL0=True
%(startLabel)s_stackSwitched:
andi t6, t6, 0xF0, dataSize=1
subi t6, t6, 40 + %(errorCodeSize)d, dataSize=8
##
## Point of no return.
## We're now going to irrevocably modify visible state.
## Anything bad that's going to happen should have happened by now or will
## happen right now.
##
wrip t0, t9, dataSize=8
#
# Set up the target code segment. Do this now so we have the right
# permissions when setting up the stack frame.
#
srli t5, t4, 16, dataSize=8
andi t5, t5, 0xFF, dataSize=8
wrdl cs, t3, t5, dataSize=8
# Tuck away the old CS for use below
limm t10, 0, dataSize=8
rdsel t10, cs, dataSize=2
wrsel cs, t5, dataSize=2
# Check that we can access everything we need to on the stack
ldst t0, hs, [1, t0, t6], dataSize=8, addressSize=8
ldst t0, hs, [1, t0, t6], \
32 + %(errorCodeSize)d, dataSize=8, addressSize=8
#
# Build up the interrupt stack frame
#
# Write out the contents of memory
%(errorCodeCode)s
st t7, hs, [1, t0, t6], %(errorCodeSize)d, dataSize=8, addressSize=8
st t10, hs, [1, t0, t6], 8 + %(errorCodeSize)d, dataSize=8, addressSize=8
rflags t10, dataSize=8
st t10, hs, [1, t0, t6], 16 + %(errorCodeSize)d, dataSize=8, addressSize=8
st rsp, hs, [1, t0, t6], 24 + %(errorCodeSize)d, dataSize=8, addressSize=8
rdsel t5, ss, dataSize=2
st t5, hs, [1, t0, t6], 32 + %(errorCodeSize)d, dataSize=8, addressSize=8
# Set the stack segment
mov rsp, rsp, t6, dataSize=8
wrsel ss, t11, dataSize=2
#
# Adjust rflags which is still in t10 from above
#
# Set IF to the lowest bit of the original gate type.
# The type field of the original gate starts at bit 40.
# Set the TF, NT, and RF bits. We'll flip them at the end.
limm t6, (1 << 8) | (1 << 14) | (1 << 16), dataSize=8
or t10, t10, t6, dataSize=8
srli t5, t4, 40, dataSize=8
srli t7, t10, 9, dataSize=8
xor t5, t7, t5, dataSize=8
andi t5, t5, 1, dataSize=8
slli t5, t5, 9, dataSize=8
or t6, t5, t6, dataSize=8
# Put the results into rflags
wrflags t6, t10
eret
};
'''
microcode = \
intCodeTemplate % {\
"startLabel" : "longModeInterrupt",
"gateCheckType" : "IntGateCheck",
"errorCodeSize" : 0,
"errorCodeCode" : ""
} + \
intCodeTemplate % {\
"startLabel" : "longModeSoftInterrupt",
"gateCheckType" : "SoftIntGateCheck",
"errorCodeSize" : 0,
"errorCodeCode" : ""
} + \
intCodeTemplate % {\
"startLabel" : "longModeInterruptWithError",
"gateCheckType" : "IntGateCheck",
"errorCodeSize" : 8,
"errorCodeCode" : '''
st t15, hs, [1, t0, t6], dataSize=8, addressSize=8
'''
} + \
'''
def rom
{
# This vectors the CPU into an interrupt handler in legacy mode.
extern legacyModeInterrupt:
panic "Legacy mode interrupts not implemented (in microcode)"
eret
};
def rom
{
extern initIntHalt:
rflags t1
limm t2, "~IFBit"
and t1, t1, t2
wrflags t1, t0
halt
eret
};
'''
| bsd-3-clause |
mysociety/yournextrepresentative | candidates/models/fields.py | 3 | 5024 | from __future__ import unicode_literals
from django.db import models
from popolo.models import Person
from compat import python_2_unicode_compatible
def get_complex_popolo_fields():
"""Return a mapping of field name to ComplexField object
This returns a dict mapping the name of the field to the
ComplexField object which defines where the value is stored in the
django-popolo models
"""
return {cf.name: cf for cf in ComplexPopoloField.objects.all()}
@python_2_unicode_compatible
class SimplePopoloField(models.Model):
class Meta:
ordering = ('order',)
VALID_FIELDS = (
('name', 'Name'),
('family_name', 'Family Name'),
('given_name', 'Given Name'),
('additional_name', 'Additional Name'),
('honorific_prefix', 'Honorific Prefix'),
('honorific_suffix', 'Honorific Suffix'),
('patronymic_name', 'Patronymic Name'),
('sort_name', 'Sort Name'),
('email', 'Email'),
('gender', 'Gender'),
('birth_date', 'Birth Date'),
('death_date', 'Death Date'),
('summary', 'Summary'),
('biography', 'Biography'),
('national_identity', 'National Identity'),
)
name = models.CharField(
choices=VALID_FIELDS,
max_length=256
)
label = models.CharField(max_length=256)
required = models.BooleanField(default=False)
info_type_key = models.CharField(
choices=(
('text', 'Text Field'),
('email', 'Email Field'),
),
max_length=256
)
order = models.IntegerField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ComplexPopoloField(models.Model):
"""
This model stores the name of the underlying relation, some details about
how it should be displayed ( label and field type ) and the details of
how to store the information in the generic relation.
The info_type_* properties are used to describe the key used to pull the
field value out of the underlying generic relation. _key being the name
of the field to store the value in info_type.
info_value_key is the name of the field in the underlying relation in
which to store the value of the complex field.
To get the value for a person you fetch the item from the generic relation
named in popolo_array where info_type_key matches info_type.
"""
class Meta:
ordering = ('order',)
VALID_ARRAYS = (
('links', 'Links'),
('contact_details', 'Contact Details'),
('identifier', 'Identifier'),
)
name = models.CharField(
max_length=256,
)
label = models.CharField(
max_length=256,
help_text="User facing description of the information",
)
popolo_array = models.CharField(
choices=VALID_ARRAYS,
max_length=256,
help_text="Name of the Popolo related type",
)
field_type = models.CharField(
choices=(
('text', 'Text Field'),
('url', 'URL Field'),
('email', 'Email Field'),
),
max_length=256,
help_text="Type of HTML field the user will see",
)
info_type_key = models.CharField(
max_length=100,
help_text="Name of the field in the array that stores the type ('note' for links, 'contact_type' for contacts, 'scheme' for identifiers)"
)
info_type = models.CharField(
max_length=100,
help_text="Value to put in the info_type_key e.g. twitter",
)
old_info_type = models.CharField(
max_length=100,
blank=True,
help_text="Used for supporting info_types that have been renamed. As such it's rarely used."
)
info_value_key = models.CharField(
max_length=100,
help_text="Name of the field in the array that stores the value, e.g 'url' for links, 'value' for contact_type, 'identifier' for identifiers",
)
order = models.IntegerField(blank=True, default=0)
def __str__(self):
return self.name
@python_2_unicode_compatible
class ExtraField(models.Model):
class Meta:
ordering = ('order',)
LINE = 'line'
LONGER_TEXT = 'longer-text'
URL = 'url'
YESNO = 'yesno'
FIELD_TYPES = (
(LINE, 'A single line of text'),
(LONGER_TEXT, 'One or more paragraphs of text'),
(URL, 'A URL'),
(YESNO, 'A Yes/No/Don\'t know dropdown')
)
key = models.CharField(max_length=256)
type = models.CharField(
max_length=64,
choices=FIELD_TYPES,
)
label = models.CharField(max_length=1024)
order = models.IntegerField(blank=True, default=0)
def __str__(self):
return self.key
class PersonExtraFieldValue(models.Model):
class Meta:
unique_together = (('person', 'field'))
person = models.ForeignKey(Person, related_name='extra_field_values')
field = models.ForeignKey(ExtraField)
value = models.TextField(blank=True)
| agpl-3.0 |
mrshelly/openerp71313 | openerp/addons/account/report/account_journal.py | 7 | 9679 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from common_report_header import common_report_header
from openerp.report import report_sxw
class journal_print(report_sxw.rml_parse, common_report_header):
def __init__(self, cr, uid, name, context=None):
if context is None:
context = {}
super(journal_print, self).__init__(cr, uid, name, context=context)
self.period_ids = []
self.last_move_id = False
self.journal_ids = []
self.sort_selection = 'am.name'
self.localcontext.update({
'time': time,
'lines': self.lines,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'get_start_period': self.get_start_period,
'get_end_period': self.get_end_period,
'get_account': self._get_account,
'get_filter': self._get_filter,
'get_start_date': self._get_start_date,
'get_end_date': self._get_end_date,
'get_fiscalyear': self._get_fiscalyear,
'display_currency':self._display_currency,
'get_sortby': self._get_sortby,
'get_target_move': self._get_target_move,
'check_last_move_id': self.check_last_move_id,
'set_last_move_id': self.set_last_move_id,
'tax_codes': self.tax_codes,
'sum_vat': self._sum_vat,
})
def set_context(self, objects, data, ids, report_type=None):
obj_move = self.pool.get('account.move.line')
new_ids = ids
self.query_get_clause = ''
self.target_move = data['form'].get('target_move', 'all')
if (data['model'] == 'ir.ui.menu'):
self.period_ids = tuple(data['form']['periods'])
self.journal_ids = tuple(data['form']['journal_ids'])
new_ids = data['form'].get('active_ids', [])
self.query_get_clause = 'AND '
self.query_get_clause += obj_move._query_get(self.cr, self.uid, obj='l', context=data['form'].get('used_context', {}))
self.sort_selection = data['form'].get('sort_selection', 'date')
objects = self.pool.get('account.journal.period').browse(self.cr, self.uid, new_ids)
elif new_ids:
#in case of direct access from account.journal.period object, we need to set the journal_ids and periods_ids
self.cr.execute('SELECT period_id, journal_id FROM account_journal_period WHERE id IN %s', (tuple(new_ids),))
res = self.cr.fetchall()
self.period_ids, self.journal_ids = zip(*res)
return super(journal_print, self).set_context(objects, data, ids, report_type=report_type)
def set_last_move_id(self, move_id):
self.last_move_id = move_id
def check_last_move_id(self, move_id):
'''
return True if we need to draw a gray line above this line, used to separate moves
'''
if self.last_move_id:
return not(self.last_move_id == move_id)
return False
def tax_codes(self, period_id, journal_id):
ids_journal_period = self.pool.get('account.journal.period').search(self.cr, self.uid,
[('journal_id', '=', journal_id), ('period_id', '=', period_id)])
self.cr.execute(
'select distinct tax_code_id from account_move_line ' \
'where period_id=%s and journal_id=%s and tax_code_id is not null and state<>\'draft\'',
(period_id, journal_id)
)
ids = map(lambda x: x[0], self.cr.fetchall())
tax_code_ids = []
if ids:
self.cr.execute('select id from account_tax_code where id in %s order by code', (tuple(ids),))
tax_code_ids = map(lambda x: x[0], self.cr.fetchall())
tax_codes = self.pool.get('account.tax.code').browse(self.cr, self.uid, tax_code_ids)
return tax_codes
def _sum_vat(self, period_id, journal_id, tax_code_id):
self.cr.execute('select sum(tax_amount) from account_move_line where ' \
'period_id=%s and journal_id=%s and tax_code_id=%s and state<>\'draft\'',
(period_id, journal_id, tax_code_id))
return self.cr.fetchone()[0] or 0.0
def _sum_debit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(debit) FROM account_move_line l, account_move am '
'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s ' + self.query_get_clause + ' ',
(tuple(move_state), tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, period_id=False, journal_id=False):
if journal_id and isinstance(journal_id, int):
journal_id = [journal_id]
if period_id and isinstance(period_id, int):
period_id = [period_id]
if not journal_id:
journal_id = self.journal_ids
if not period_id:
period_id = self.period_ids
if not (period_id and journal_id):
return 0.0
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT SUM(l.credit) FROM account_move_line l, account_move am '
'WHERE l.move_id=am.id AND am.state IN %s AND l.period_id IN %s AND l.journal_id IN %s '+ self.query_get_clause+'',
(tuple(move_state), tuple(period_id), tuple(journal_id)))
return self.cr.fetchone()[0] or 0.0
def lines(self, period_id, journal_id=False):
if not journal_id:
journal_id = self.journal_ids
else:
journal_id = [journal_id]
obj_mline = self.pool.get('account.move.line')
self.cr.execute('update account_journal_period set state=%s where journal_id IN %s and period_id=%s and state=%s', ('printed', self.journal_ids, period_id, 'draft'))
move_state = ['draft','posted']
if self.target_move == 'posted':
move_state = ['posted']
self.cr.execute('SELECT l.id FROM account_move_line l, account_move am WHERE l.move_id=am.id AND am.state IN %s AND l.period_id=%s AND l.journal_id IN %s ' + self.query_get_clause + ' ORDER BY '+ self.sort_selection + ', l.move_id',(tuple(move_state), period_id, tuple(journal_id) ))
ids = map(lambda x: x[0], self.cr.fetchall())
return obj_mline.browse(self.cr, self.uid, ids)
def _set_get_account_currency_code(self, account_id):
self.cr.execute("SELECT c.symbol AS code "\
"FROM res_currency c,account_account AS ac "\
"WHERE ac.id = %s AND ac.currency_id = c.id" % (account_id))
result = self.cr.fetchone()
if result:
self.account_currency = result[0]
else:
self.account_currency = False
def _get_fiscalyear(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).fiscalyear_id.name
return super(journal_print, self)._get_fiscalyear(data)
def _get_account(self, data):
if data['model'] == 'account.journal.period':
return self.pool.get('account.journal.period').browse(self.cr, self.uid, data['id']).company_id.name
return super(journal_print, self)._get_account(data)
def _display_currency(self, data):
if data['model'] == 'account.journal.period':
return True
return data['form']['amount_currency']
def _get_sortby(self, data):
if self.sort_selection == 'date':
return 'Date'
elif self.sort_selection == 'ref':
return 'Reference Number'
return 'Date'
report_sxw.report_sxw('report.account.journal.period.print', 'account.journal.period', 'addons/account/report/account_journal.rml', parser=journal_print, header='external')
report_sxw.report_sxw('report.account.journal.period.print.sale.purchase', 'account.journal.period', 'addons/account/report/account_journal_sale_purchase.rml', parser=journal_print, header='external')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
darksylinc/qt-creator | share/qtcreator/debugger/pdumper.py | 23 | 9000 |
import pdb;
import sys;
import linecache
def qdebug(options = None,
expanded = None,
typeformats = None,
individualformats = None,
watchers = None):
class QDebug:
def __init__(self,
options = None,
expanded = None,
typeformats = None,
individualformats = None,
watchers = None):
self.options = options
self.expandedINames = expanded
self.typeformats = typeformats
self.individualformats = individualformats
self.watchers = watchers
self.buffer = ""
if self.options == "listmodules":
self.handleListModules()
elif self.options == "listsymbols":
self.handleListSymbols(expanded)
else:
self.handleListVars()
def put(self, value):
#sys.stdout.write(value)
self.buffer += value
def putField(self, name, value):
self.put('%s="%s",' % (name, value))
def putItemCount(self, count):
self.put('value="<%s items>",' % count)
def putEllipsis(self):
self.put('{name="<incomplete>",value="",type="",numchild="0"},')
def cleanType(self, type):
t = str(type)
if t.startswith("<type '") and t.endswith("'>"):
t = t[7:-2]
if t.startswith("<class '") and t.endswith("'>"):
t = t[8:-2]
return t
def putType(self, type, priority = 0):
self.putField("type", self.cleanType(type))
def putAddress(self, addr):
self.put('addr="%s",' % cleanAddress(addr))
def putNumChild(self, numchild):
self.put('numchild="%s",' % numchild)
def putValue(self, value, encoding = None, priority = 0):
self.putField("value", value)
def putName(self, name):
self.put('name="%s",' % name)
def isExpanded(self, iname):
#self.warn("IS EXPANDED: %s in %s" % (iname, self.expandedINames))
if iname.startswith("None"):
raise "Illegal iname '%s'" % iname
#self.warn(" --> %s" % (iname in self.expandedINames))
return iname in self.expandedINames
def isExpandedIName(self, iname):
return iname in self.expandedINames
def itemFormat(self, item):
format = self.formats.get(str(cleanAddress(item.value.address)))
if format is None:
format = self.typeformats.get(stripClassTag(str(item.value.type)))
return format
def dumpFrame(self, frame):
for var in frame.f_locals.keys():
if var == "__file__":
continue
#if var == "__name__":
# continue
if var == "__package__":
continue
if var == "qdebug":
continue
if var != '__builtins__':
value = frame.f_locals[var]
self.dumpValue(value, var, "local.%s" % var)
def dumpValue(self, value, name, iname):
t = type(value)
tt = self.cleanType(t)
if tt == "module" or tt == "function":
return
if str(value).startswith("<class '"):
return
# FIXME: Should we?
if str(value).startswith("<enum-item "):
return
self.put("{")
self.putField("iname", iname)
self.putName(name)
self.putType(tt)
if tt == "NoneType":
self.putValue("None")
self.putNumChild(0)
elif tt == "list" or tt == "tuple":
self.putItemCount(len(value))
#self.putValue(value)
self.put("children=[")
for i in xrange(len(value)):
self.dumpValue(value[i], str(i), "%s.%d" % (iname, i))
self.put("]")
elif tt == "str":
v = value
self.putValue(v.encode('hex'))
self.putField("valueencoded", 6)
self.putNumChild(0)
elif tt == "unicode":
v = value
self.putValue(v.encode('hex'))
self.putField("valueencoded", 6)
self.putNumChild(0)
elif tt == "buffer":
v = str(value)
self.putValue(v.encode('hex'))
self.putField("valueencoded", 6)
self.putNumChild(0)
elif tt == "xrange":
b = iter(value).next()
e = b + len(value)
self.putValue("(%d, %d)" % (b, e))
self.putNumChild(0)
elif tt == "dict":
self.putItemCount(len(value))
self.putField("childnumchild", 2)
self.put("children=[")
i = 0
for (k, v) in value.iteritems():
self.put("{")
self.putType(" ")
self.putValue("%s: %s" % (k, v))
if self.isExpanded(iname):
self.put("children=[")
self.dumpValue(k, "key", "%s.%d.k" % (iname, i))
self.dumpValue(v, "value", "%s.%d.v" % (iname, i))
self.put("]")
self.put("},")
i += 1
self.put("]")
elif tt == "class":
pass
elif tt == "module":
pass
elif tt == "function":
pass
elif str(value).startswith("<enum-item "):
# FIXME: Having enums always shown like this is not nice.
self.putValue(str(value)[11:-1])
self.putNumChild(0)
else:
v = str(value)
p = v.find(" object at ")
if p > 1:
v = "@" + v[p + 11:-1]
self.putValue(v)
if self.isExpanded(iname):
self.put("children=[")
for child in dir(value):
if child == "__dict__":
continue
if child == "__doc__":
continue
if child == "__module__":
continue
attr = getattr(value, child)
if callable(attr):
continue
try:
self.dumpValue(attr, child, "%s.%s" % (iname, child))
except:
pass
self.put("],")
self.put("},")
def warn(self, msg):
self.putField("warning", msg)
def handleListVars(self):
# Trigger error to get a backtrace.
frame = None
#self.warn("frame: %s" % frame)
try:
raise ZeroDivisionError
except ZeroDivisionError:
frame = sys.exc_info()[2].tb_frame.f_back
limit = 30
n = 0
isActive = False
while frame is not None and n < limit:
#self.warn("frame: %s" % frame.f_locals.keys())
lineno = frame.f_lineno
code = frame.f_code
filename = code.co_filename
name = code.co_name
if isActive:
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, frame.f_globals)
self.dumpFrame(frame)
if name == "<module>":
isActive = False
if name == "trace_dispatch":
isActive = True
frame = frame.f_back
n = n + 1
#sys.stdout.flush()
def handleListModules(self):
self.put("modules=[");
for name in sys.modules:
self.put("{")
self.putName(name)
self.putValue(sys.modules[name])
self.put("},")
self.put("]")
#sys.stdout.flush()
def handleListSymbols(self, module):
#self.put("symbols=%s" % dir(sys.modules[module]))
self.put("symbols=[");
for name in sys.modules:
self.put("{")
self.putName(name)
#self.putValue(sys.modules[name])
self.put("},")
self.put("]")
#sys.stdout.flush()
d = QDebug(options, expanded, typeformats, individualformats, watchers)
#print d.buffer
sys.stdout.write(d.buffer)
sys.stdout.flush()
| lgpl-2.1 |
Eseoghene/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/tlslite/mathtls.py | 273 | 11647 | """Miscellaneous helper functions."""
from utils.compat import *
from utils.cryptomath import *
import hmac
import md5
import sha
#1024, 1536, 2048, 3072, 4096, 6144, and 8192 bit groups]
goodGroupParameters = [(2,0xEEAF0AB9ADB38DD69C33F80AFA8FC5E86072618775FF3C0B9EA2314C9C256576D674DF7496EA81D3383B4813D692C6E0E0D5D8E250B98BE48E495C1D6089DAD15DC7D7B46154D6B6CE8EF4AD69B15D4982559B297BCF1885C529F566660E57EC68EDBC3C05726CC02FD4CBF4976EAA9AFD5138FE8376435B9FC61D2FC0EB06E3),\
(2,0x9DEF3CAFB939277AB1F12A8617A47BBBDBA51DF499AC4C80BEEEA9614B19CC4D5F4F5F556E27CBDE51C6A94BE4607A291558903BA0D0F84380B655BB9A22E8DCDF028A7CEC67F0D08134B1C8B97989149B609E0BE3BAB63D47548381DBC5B1FC764E3F4B53DD9DA1158BFD3E2B9C8CF56EDF019539349627DB2FD53D24B7C48665772E437D6C7F8CE442734AF7CCB7AE837C264AE3A9BEB87F8A2FE9B8B5292E5A021FFF5E91479E8CE7A28C2442C6F315180F93499A234DCF76E3FED135F9BB),\
(2,0xAC6BDB41324A9A9BF166DE5E1389582FAF72B6651987EE07FC3192943DB56050A37329CBB4A099ED8193E0757767A13DD52312AB4B03310DCD7F48A9DA04FD50E8083969EDB767B0CF6095179A163AB3661A05FBD5FAAAE82918A9962F0B93B855F97993EC975EEAA80D740ADBF4FF747359D041D5C33EA71D281E446B14773BCA97B43A23FB801676BD207A436C6481F1D2B9078717461A5B9D32E688F87748544523B524B0D57D5EA77A2775D2ECFA032CFBDBF52FB3786160279004E57AE6AF874E7303CE53299CCC041C7BC308D82A5698F3A8D0C38271AE35F8E9DBFBB694B5C803D89F7AE435DE236D525F54759B65E372FCD68EF20FA7111F9E4AFF73),\
(2,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DCC4024FFFFFFFFFFFFFFFF),\
(5,0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C93402849236C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AACC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E438777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F5683423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD922222E04A4037C0713EB57A81A23F0C73473FC646CEA306B4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A364597E899A0255DC164F31CC50846851DF9AB48195DED7EA1B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F924009438B481C6CD7889A002ED5EE382BC9190DA6FC026E479558E4475677E9AA9E3050E2765694DFC81F56E880B96E7160C980DD98EDD3DFFFFFFFFFFFFFFFFF)]
def P_hash(hashModule, secret, seed, length):
bytes = createByteArrayZeros(length)
secret = bytesToString(secret)
seed = bytesToString(seed)
A = seed
index = 0
while 1:
A = hmac.HMAC(secret, A, hashModule).digest()
output = hmac.HMAC(secret, A+seed, hashModule).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def PRF(secret, label, seed, length):
#Split the secret into left and right halves
S1 = secret[ : int(math.ceil(len(secret)/2.0))]
S2 = secret[ int(math.floor(len(secret)/2.0)) : ]
#Run the left half through P_MD5 and the right half through P_SHA1
p_md5 = P_hash(md5, S1, concatArrays(stringToBytes(label), seed), length)
p_sha1 = P_hash(sha, S2, concatArrays(stringToBytes(label), seed), length)
#XOR the output values and return the result
for x in range(length):
p_md5[x] ^= p_sha1[x]
return p_md5
def PRF_SSL(secret, seed, length):
secretStr = bytesToString(secret)
seedStr = bytesToString(seed)
bytes = createByteArrayZeros(length)
index = 0
for x in range(26):
A = chr(ord('A')+x) * (x+1) # 'A', 'BB', 'CCC', etc..
input = secretStr + sha.sha(A + secretStr + seedStr).digest()
output = md5.md5(input).digest()
for c in output:
if index >= length:
return bytes
bytes[index] = ord(c)
index += 1
return bytes
def makeX(salt, username, password):
if len(username)>=256:
raise ValueError("username too long")
if len(salt)>=256:
raise ValueError("salt too long")
return stringToNumber(sha.sha(salt + sha.sha(username + ":" + password)\
.digest()).digest())
#This function is used by VerifierDB.makeVerifier
def makeVerifier(username, password, bits):
bitsIndex = {1024:0, 1536:1, 2048:2, 3072:3, 4096:4, 6144:5, 8192:6}[bits]
g,N = goodGroupParameters[bitsIndex]
salt = bytesToString(getRandomBytes(16))
x = makeX(salt, username, password)
verifier = powMod(g, x, N)
return N, g, salt, verifier
def PAD(n, x):
nLength = len(numberToString(n))
s = numberToString(x)
if len(s) < nLength:
s = ("\0" * (nLength-len(s))) + s
return s
def makeU(N, A, B):
return stringToNumber(sha.sha(PAD(N, A) + PAD(N, B)).digest())
def makeK(N, g):
return stringToNumber(sha.sha(numberToString(N) + PAD(N, g)).digest())
"""
MAC_SSL
Modified from Python HMAC by Trevor
"""
class MAC_SSL:
"""MAC_SSL class.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
def __init__(self, key, msg = None, digestmod = None):
"""Create a new MAC_SSL object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. Defaults to the md5 module.
"""
if digestmod is None:
import md5
digestmod = md5
if key == None: #TREVNEW - for faster copying
return #TREVNEW
self.digestmod = digestmod
self.outer = digestmod.new()
self.inner = digestmod.new()
self.digest_size = digestmod.digest_size
ipad = "\x36" * 40
opad = "\x5C" * 40
self.inner.update(key)
self.inner.update(ipad)
self.outer.update(key)
self.outer.update(opad)
if msg is not None:
self.update(msg)
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
other = MAC_SSL(None) #TREVNEW - for faster copying
other.digest_size = self.digest_size #TREVNEW
other.digestmod = self.digestmod
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
return "".join([hex(ord(x))[2:].zfill(2)
for x in tuple(self.digest())])
| apache-2.0 |
DmitriyFromBSUIR/Home_Automation_Server | WebServer/Preforked_Hybrid_MultiServer.py | 1 | 7714 | from multiprocessing import Pool, Manager, Process, Lock, Queue, Value, Array, Pipe, TimeoutError
import multiprocessing as mp
import concurrent.futures
import time
import os
from timeit import default_timer as timer
import struct
import itertools as it
import math
import random as rnd
import time
def sendMsgsToMultiServer(webservice_pipe, handlerArgs):
curProcPID = os.getpid()
#webservice_pipe.send( struct.pack("<uQ", (curProcPID, seqID, str(curProcPID) + "_" + str(seqID)) ) )
secs = rnd.randint(1, 20)
time.sleep(secs)
handlerID = handlerArgs[0]
seqID = handlerArgs[1]
#webservice_pipe.send( curProcPID, handlerID, seqID )
webservice_pipe.send(curProcPID)
webservice_pipe.send(handlerID)
webservice_pipe.send(seqID)
print("from Web-service: curProcPID = ", curProcPID)
#webservice_pipe.send( seqID )
webservice_pipe.close()
return secs
def recvWebServiceMsg(multiServer_pipe):
#print("multiserver_pipe: ", multiServer_pipe)
#curProcPID = multiServer_pipe.recv()
#curProcSeqID = multiServer_pipe.recv()
webserviceMsg = multiServer_pipe.recv()
handlerID = multiServer_pipe.recv()
seqID = multiServer_pipe.recv()
#webserviceMsg = (curProcPID, curProcSeqID)
#print("PID: %d, SeqID: %d" % (webserviceMsg[0], webserviceMsg[1]))
print("webserviceMsg: ", webserviceMsg, " handlerID: ", handlerID, " seqID: ", seqID)
return webserviceMsg
def pid_handler(webservice_pipe, argsList):
sendMsgsToMultiServer(webservice_pipe, argsList)
Pipes = list()
class PreforkedHybridMultiServer:
def __init__(self, handlersList, handlersArgsList, poolTasksTimeout=None, poolShoutdownTimeout=300, AMDAHLS_COEFFICIENT=2):
# dispathingSupervisor = mp.Manager()
# generalLock = mp.Lock()
self.WEBSERVICES_MAX_COUNT = len(handlersList)
self.POOL_SIZE = AMDAHLS_COEFFICIENT * mp.cpu_count()
self._ppeWorkers = concurrent.futures.ProcessPoolExecutor(max_workers=self.POOL_SIZE)
self._ppeRecvPipeWorkers = concurrent.futures.ProcessPoolExecutor(max_workers=self.WEBSERVICES_MAX_COUNT)
self._futuresTasks = list()
self._poolTasksTimeout = poolTasksTimeout
self._poolShoutdownTimeout = poolShoutdownTimeout
# endpoints for IPC (Coordinator and web-services)
self._pipes = []
self.ipcEndpointsCreate()
# the list of handlers for scheduler
self._handlersList = handlersList
# the list of handlers args
self._handlersArgsList = handlersArgsList
# working Proc PID and task
self._generalList = list()
def ipcEndpointsCreate(self):
#self._pipes.clear()
for i in range(0, self.WEBSERVICES_MAX_COUNT):
parent_conn, child_conn = Pipe()
self._pipes.append( (parent_conn, child_conn) )
Pipes.append( (parent_conn, child_conn) )
# make pipes global visible
#Pipes = list(self._pipes)
#Pipes.append(self._pipes[i])
print("Glogal Visible Pipes:")
print(Pipes)
def processPoolExecutorMonitoringStatus(self):
'''
return_when=
FIRST_COMPLETED The function will return when any future finishes or is cancelled.
FIRST_EXCEPTION The function will return when any future finishes by raising an exception. If no future raises an exception then it is equivalent to ALL_COMPLETED.
ALL_COMPLETED The function will return when all futures finish or are cancelled.
'''
fsTasksAreDone, fsTasksAreNotDone = concurrent.futures.wait(self._futuresTasks,
timeout=self._poolShoutdownTimeout,
return_when=concurrent.futures.ALL_COMPLETED)
print("LOG: Tasks that have been finished successfully:")
print(fsTasksAreDone)
print("LOG: Tasks that have not been finished:")
print(fsTasksAreNotDone)
def dispatheringScheduler(self, executors):
# futuresTasks = [executors.map(f, (num, generalLock)) for num in range(3)]
for i in range(0, self.WEBSERVICES_MAX_COUNT):
self._handlersArgsList[i].append(i)
self._futuresTasks.append(
executors.submit(self._handlersList[i], self._pipes[i][1], self._handlersArgsList[i]))
curProcPID = os.getpid()
self._generalList.append(([curProcPID, i], self._futuresTasks[i]))
def getCallableResultsFromFS(self):
# for futureTask in concurrent.futures.as_completed(futuresTasks):
for finishedTask in concurrent.futures.as_completed(self._futuresTasks, timeout=self._poolTasksTimeout):
# finishedTask = futuresTasks[futureTask]
try:
# print('working proc in procPoolExecutors : %d, PID: %d and result: %d' % (0, 0, finishedTask.result()))
print(finishedTask.result())
except Exception as exc:
print('LOG: web-service future struct: %r generated an exception: %s' % (finishedTask, exc))
# except concurrent.futures.BrokenProcessPool as brkProcErr:
except concurrent.futures.CancelledError as cnclErr:
print(
"LOG: Error! One or more of the workers of a ProcessPoolExecutor has cancelled or terminated in a non-clean fashion (for example, if it was killed from the outside)")
print(cnclErr)
except TimeoutError as tmoutErr:
print("LOG: We lacked patience and got a multiprocessing.TimeoutError")
print(tmoutErr)
finally:
print("Success iteration")
def poolCorrectShutdown(self, executors):
self.processPoolExecutorMonitoringStatus()
executors.shutdown(wait=True)
def run(self):
startTime = timer()
with self._ppeWorkers as executors:
# task scheduling
self.dispatheringScheduler(executors)
# get results from callable type executors (futures struct)
self.getCallableResultsFromFS()
# pool turn off
self.poolCorrectShutdown(executors)
endTime = timer() - startTime
print("LOG: processing time: ", endTime)
def recvWebServicesMsgs(self):
futuresTasks = list()
with self._ppeRecvPipeWorkers as executors:
for i in range(0, self.WEBSERVICES_MAX_COUNT):
#futuresTasks.append(executors.map(recvWebServiceMsg, self._pipes[i][0]))
#print("Global Pipe: ", Pipes[i][0])
#futuresTasks.append(executors.map(recvWebServiceMsg, Pipes[i][0]))
futuresTasks.append(executors.map(recvWebServiceMsg, Pipes[i]))
#for i, result in zip(self._pipes, futuresTasks):
for i, result in zip(Pipes, futuresTasks):
# futuresTasks.append(executors.map(f, num[i], generalLock))
#print("communication pipes pair :", i, "; msg: [", result[0], ", ", result[1], ", ", result[2], "]")
print("communication pipes pair :", i, "; msg: [", result, "]")
if __name__ == '__main__':
#create MultiServer
handlers = [ pid_handler, pid_handler, pid_handler ]
#handlers = list()
#handlers.append(sendMsgsToMultiServer)
handlers_args = list()
for i in range(0, len(handlers)):
handler_args = list()
handler_args.append(i)
handlers_args.append(handler_args)
phMultiServer = PreforkedHybridMultiServer(handlers, handlers_args)
phMultiServer.run()
# test for IPC
phMultiServer.recvWebServicesMsgs() | apache-2.0 |
Awesomeomics/webserver | env/lib/python2.7/site-packages/pip/_vendor/colorama/ansitowin32.py | 208 | 6664 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
import re
import sys
from .ansi import AnsiFore, AnsiBack, AnsiStyle, Style
from .winterm import WinTerm, WinColor, WinStyle
from .win32 import windll
winterm = None
if windll is not None:
winterm = WinTerm()
def is_a_tty(stream):
return hasattr(stream, 'isatty') and stream.isatty()
class StreamWrapper(object):
'''
Wraps a stream (such as stdout), acting as a transparent proxy for all
attribute access apart from method 'write()', which is delegated to our
Converter instance.
'''
def __init__(self, wrapped, converter):
# double-underscore everything to prevent clashes with names of
# attributes on the wrapped stream object.
self.__wrapped = wrapped
self.__convertor = converter
def __getattr__(self, name):
return getattr(self.__wrapped, name)
def write(self, text):
self.__convertor.write(text)
class AnsiToWin32(object):
'''
Implements a 'write()' method which, on Windows, will strip ANSI character
sequences from the text, and if outputting to a tty, will convert them into
win32 function calls.
'''
ANSI_RE = re.compile('\033\[((?:\d|;)*)([a-zA-Z])')
def __init__(self, wrapped, convert=None, strip=None, autoreset=False):
# The wrapped stream (normally sys.stdout or sys.stderr)
self.wrapped = wrapped
# should we reset colors to defaults after every .write()
self.autoreset = autoreset
# create the proxy wrapping our output stream
self.stream = StreamWrapper(wrapped, self)
on_windows = sys.platform.startswith('win')
# should we strip ANSI sequences from our output?
if strip is None:
strip = on_windows
self.strip = strip
# should we should convert ANSI sequences into win32 calls?
if convert is None:
convert = on_windows and is_a_tty(wrapped)
self.convert = convert
# dict of ansi codes to win32 functions and parameters
self.win32_calls = self.get_win32_calls()
# are we wrapping stderr?
self.on_stderr = self.wrapped is sys.stderr
def should_wrap(self):
'''
True if this class is actually needed. If false, then the output
stream will not be affected, nor will win32 calls be issued, so
wrapping stdout is not actually required. This will generally be
False on non-Windows platforms, unless optional functionality like
autoreset has been requested using kwargs to init()
'''
return self.convert or self.strip or self.autoreset
def get_win32_calls(self):
if self.convert and winterm:
return {
AnsiStyle.RESET_ALL: (winterm.reset_all, ),
AnsiStyle.BRIGHT: (winterm.style, WinStyle.BRIGHT),
AnsiStyle.DIM: (winterm.style, WinStyle.NORMAL),
AnsiStyle.NORMAL: (winterm.style, WinStyle.NORMAL),
AnsiFore.BLACK: (winterm.fore, WinColor.BLACK),
AnsiFore.RED: (winterm.fore, WinColor.RED),
AnsiFore.GREEN: (winterm.fore, WinColor.GREEN),
AnsiFore.YELLOW: (winterm.fore, WinColor.YELLOW),
AnsiFore.BLUE: (winterm.fore, WinColor.BLUE),
AnsiFore.MAGENTA: (winterm.fore, WinColor.MAGENTA),
AnsiFore.CYAN: (winterm.fore, WinColor.CYAN),
AnsiFore.WHITE: (winterm.fore, WinColor.GREY),
AnsiFore.RESET: (winterm.fore, ),
AnsiBack.BLACK: (winterm.back, WinColor.BLACK),
AnsiBack.RED: (winterm.back, WinColor.RED),
AnsiBack.GREEN: (winterm.back, WinColor.GREEN),
AnsiBack.YELLOW: (winterm.back, WinColor.YELLOW),
AnsiBack.BLUE: (winterm.back, WinColor.BLUE),
AnsiBack.MAGENTA: (winterm.back, WinColor.MAGENTA),
AnsiBack.CYAN: (winterm.back, WinColor.CYAN),
AnsiBack.WHITE: (winterm.back, WinColor.GREY),
AnsiBack.RESET: (winterm.back, ),
}
def write(self, text):
if self.strip or self.convert:
self.write_and_convert(text)
else:
self.wrapped.write(text)
self.wrapped.flush()
if self.autoreset:
self.reset_all()
def reset_all(self):
if self.convert:
self.call_win32('m', (0,))
elif not self.wrapped.closed and is_a_tty(self.wrapped):
self.wrapped.write(Style.RESET_ALL)
def write_and_convert(self, text):
'''
Write the given text to our wrapped stream, stripping any ANSI
sequences from the text, and optionally converting them into win32
calls.
'''
cursor = 0
for match in self.ANSI_RE.finditer(text):
start, end = match.span()
self.write_plain_text(text, cursor, start)
self.convert_ansi(*match.groups())
cursor = end
self.write_plain_text(text, cursor, len(text))
def write_plain_text(self, text, start, end):
if start < end:
self.wrapped.write(text[start:end])
self.wrapped.flush()
def convert_ansi(self, paramstring, command):
if self.convert:
params = self.extract_params(paramstring)
self.call_win32(command, params)
def extract_params(self, paramstring):
def split(paramstring):
for p in paramstring.split(';'):
if p != '':
yield int(p)
return tuple(split(paramstring))
def call_win32(self, command, params):
if params == []:
params = [0]
if command == 'm':
for param in params:
if param in self.win32_calls:
func_args = self.win32_calls[param]
func = func_args[0]
args = func_args[1:]
kwargs = dict(on_stderr=self.on_stderr)
func(*args, **kwargs)
elif command in ('H', 'f'): # set cursor position
func = winterm.set_cursor_position
func(params, on_stderr=self.on_stderr)
elif command in ('J'):
func = winterm.erase_data
func(params, on_stderr=self.on_stderr)
elif command == 'A':
if params == () or params == None:
num_rows = 1
else:
num_rows = params[0]
func = winterm.cursor_up
func(num_rows, on_stderr=self.on_stderr)
| mit |
pepeportela/edx-platform | openedx/core/djangoapps/waffle_utils/__init__.py | 1 | 12115 | """
Utilities for waffle.
Includes namespacing, caching, and course overrides for waffle flags.
Usage:
For Waffle Flags, first set up the namespace, and then create flags using the
namespace. For example:
WAFFLE_FLAG_NAMESPACE = WaffleFlagNamespace(name='course_experience')
# Use CourseWaffleFlag when you are in the context of a course.
UNIFIED_COURSE_TAB_FLAG = CourseWaffleFlag(WAFFLE_FLAG_NAMESPACE, 'unified_course_tab')
# Use WaffleFlag when outside the context of a course.
HIDE_SEARCH_FLAG = WaffleFlag(WAFFLE_FLAG_NAMESPACE, 'hide_search')
You can check these flags in code using the following:
HIDE_SEARCH_FLAG.is_enabled()
UNIFIED_COURSE_TAB_FLAG.is_enabled(course_key)
To test these WaffleFlags, see testutils.py.
In the above examples, you will use Django Admin "waffle" section to configure
for a flag named: course_experience.unified_course_tab
You could also use the Django Admin "waffle_utils" section to configure a course
override for this same flag (e.g. course_experience.unified_course_tab).
For Waffle Switches, first set up the namespace, and then create the flag name.
For example:
WAFFLE_SWITCHES = WaffleSwitchNamespace(name=WAFFLE_NAMESPACE)
ESTIMATE_FIRST_ATTEMPTED = 'estimate_first_attempted'
You can then use the switch as follows:
WAFFLE_SWITCHES.is_enabled(waffle.ESTIMATE_FIRST_ATTEMPTED)
To test WaffleSwitchNamespace, use the provided context managers. For example:
with WAFFLE_SWITCHES.override(waffle.ESTIMATE_FIRST_ATTEMPTED, active=True):
...
"""
import logging
from abc import ABCMeta
from contextlib import contextmanager
from opaque_keys.edx.keys import CourseKey
from request_cache import get_cache as get_request_cache, get_request
from waffle import flag_is_active, switch_is_active
from waffle.models import Flag
from waffle.testutils import override_switch as waffle_override_switch
from .models import WaffleFlagCourseOverrideModel
log = logging.getLogger(__name__)
class WaffleNamespace(object):
"""
A base class for a request cached namespace for waffle flags/switches.
An instance of this class represents a single namespace
(e.g. "course_experience"), and can be used to work with a set of
flags or switches that will all share this namespace.
"""
__metaclass__ = ABCMeta
def __init__(self, name, log_prefix=None):
"""
Initializes the waffle namespace instance.
Arguments:
name (String): Namespace string appended to start of all waffle
flags and switches (e.g. "grades")
log_prefix (String): Optional string to be appended to log messages
(e.g. "Grades: "). Defaults to ''.
"""
assert name, "The name is required."
self.name = name
self.log_prefix = log_prefix if log_prefix else ''
def _namespaced_name(self, setting_name):
"""
Returns the namespaced name of the waffle switch/flag.
For example, the namespaced name of a waffle switch/flag would be:
my_namespace.my_setting_name
Arguments:
setting_name (String): The name of the flag or switch.
"""
return u'{}.{}'.format(self.name, setting_name)
@staticmethod
def _get_request_cache():
"""
Returns a request cache shared by all instances of this class.
"""
return get_request_cache('WaffleNamespace')
class WaffleSwitchNamespace(WaffleNamespace):
"""
Provides a single namespace for a set of waffle switches.
All namespaced switch values are stored in a single request cache containing
all switches for all namespaces.
"""
def is_enabled(self, switch_name):
"""
Returns and caches whether the given waffle switch is enabled.
"""
namespaced_switch_name = self._namespaced_name(switch_name)
value = self._cached_switches.get(namespaced_switch_name)
if value is None:
value = switch_is_active(namespaced_switch_name)
self._cached_switches[namespaced_switch_name] = value
return value
@contextmanager
def override(self, switch_name, active=True):
"""
Overrides the active value for the given switch for the duration of this
contextmanager.
Note: The value is overridden in the request cache AND in the model.
"""
previous_active = self.is_enabled(switch_name)
try:
self.override_for_request(switch_name, active)
with self.override_in_model(switch_name, active):
yield
finally:
self.override_for_request(switch_name, previous_active)
def override_for_request(self, switch_name, active=True):
"""
Overrides the active value for the given switch for the remainder of
this request (as this is not a context manager).
Note: The value is overridden in the request cache, not in the model.
"""
namespaced_switch_name = self._namespaced_name(switch_name)
self._cached_switches[namespaced_switch_name] = active
log.info(u"%sSwitch '%s' set to %s for request.", self.log_prefix, namespaced_switch_name, active)
@contextmanager
def override_in_model(self, switch_name, active=True):
"""
Overrides the active value for the given switch for the duration of this
contextmanager.
Note: The value is overridden in the model, not the request cache.
"""
namespaced_switch_name = self._namespaced_name(switch_name)
with waffle_override_switch(namespaced_switch_name, active):
log.info(u"%sSwitch '%s' set to %s in model.", self.log_prefix, namespaced_switch_name, active)
yield
@property
def _cached_switches(self):
"""
Returns a dictionary of all namespaced switches in the request cache.
"""
return self._get_request_cache().setdefault('switches', {})
class WaffleFlagNamespace(WaffleNamespace):
"""
Provides a single namespace for a set of waffle flags.
All namespaced flag values are stored in a single request cache containing
all flags for all namespaces.
"""
__metaclass__ = ABCMeta
@property
def _cached_flags(self):
"""
Returns a dictionary of all namespaced flags in the request cache.
"""
return self._get_request_cache().setdefault('flags', {})
def is_flag_active(self, flag_name, check_before_waffle_callback=None, flag_undefined_default=None):
"""
Returns and caches whether the provided flag is active.
If the flag value is already cached in the request, it is returned.
If check_before_waffle_callback is supplied, it is called before
checking waffle.
If check_before_waffle_callback returns None, or if it is not supplied,
then waffle is used to check the flag.
Important: Caching for the check_before_waffle_callback must be handled
by the callback itself.
Arguments:
flag_name (String): The name of the flag to check.
check_before_waffle_callback (function): (Optional) A function that
will be checked before continuing on to waffle. If
check_before_waffle_callback(namespaced_flag_name) returns True
or False, it is returned. If it returns None, then waffle is
used.
flag_undefined_default (Boolean): A default value to be returned if
the waffle flag is to be checked, but doesn't exist.
"""
# validate arguments
namespaced_flag_name = self._namespaced_name(flag_name)
if check_before_waffle_callback:
value = check_before_waffle_callback(namespaced_flag_name)
if value is None:
# Do not get cached value for the callback, because the key might be different.
# The callback needs to handle its own caching if it wants it.
value = self._cached_flags.get(namespaced_flag_name)
if value is None:
if flag_undefined_default is not None:
# determine if the flag is undefined in waffle
try:
Flag.objects.get(name=namespaced_flag_name)
except Flag.DoesNotExist:
value = flag_undefined_default
if value is None:
value = flag_is_active(get_request(), namespaced_flag_name)
self._cached_flags[namespaced_flag_name] = value
return value
class WaffleFlag(object):
"""
Represents a single waffle flag, using a cached waffle namespace.
"""
def __init__(self, waffle_namespace, flag_name, flag_undefined_default=None):
"""
Initializes the waffle flag instance.
Arguments:
waffle_namespace (WaffleFlagNamespace): Provides a cached namespace
for this flag.
flag_name (String): The name of the flag (without namespacing).
flag_undefined_default (Boolean): A default value to be returned if
the waffle flag is to be checked, but doesn't exist.
"""
self.waffle_namespace = waffle_namespace
self.flag_name = flag_name
self.flag_undefined_default = flag_undefined_default
@property
def namespaced_flag_name(self):
"""
Returns the fully namespaced flag name.
"""
return self.waffle_namespace._namespaced_name(self.flag_name)
def is_enabled(self):
"""
Returns whether or not the flag is enabled.
"""
return self.waffle_namespace.is_flag_active(
self.flag_name,
flag_undefined_default=self.flag_undefined_default
)
class CourseWaffleFlag(WaffleFlag):
"""
Represents a single waffle flag that can be forced on/off for a course.
Uses a cached waffle namespace.
"""
def _get_course_override_callback(self, course_key):
"""
Returns a function to use as the check_before_waffle_callback.
Arguments:
course_key (CourseKey): The course to check for override before
checking waffle.
"""
def course_override_callback(namespaced_flag_name):
"""
Returns True/False if the flag was forced on or off for the provided
course. Returns None if the flag was not overridden.
Note: Has side effect of caching the override value.
Arguments:
namespaced_flag_name (String): A namespaced version of the flag
to check.
"""
cache_key = u'{}.{}'.format(namespaced_flag_name, unicode(course_key))
force_override = self.waffle_namespace._cached_flags.get(cache_key)
if force_override is None:
force_override = WaffleFlagCourseOverrideModel.override_value(namespaced_flag_name, course_key)
self.waffle_namespace._cached_flags[cache_key] = force_override
if force_override == WaffleFlagCourseOverrideModel.ALL_CHOICES.on:
return True
if force_override == WaffleFlagCourseOverrideModel.ALL_CHOICES.off:
return False
return None
return course_override_callback
def is_enabled(self, course_key=None):
"""
Returns whether or not the flag is enabled.
Arguments:
course_key (CourseKey): The course to check for override before
checking waffle.
"""
# validate arguments
assert issubclass(type(course_key), CourseKey), "The course_key '{}' must be a CourseKey.".format(
str(course_key)
)
return self.waffle_namespace.is_flag_active(
self.flag_name,
check_before_waffle_callback=self._get_course_override_callback(course_key),
flag_undefined_default=self.flag_undefined_default
)
| agpl-3.0 |
sunqm/pyscf | pyscf/ci/addons.py | 1 | 1311 | #!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import numpy as np
from pyscf import scf
def convert_to_gcisd(myci):
from pyscf.ci import gcisd
if isinstance(myci, gcisd.GCISD):
return myci
mf = scf.addons.convert_to_ghf(myci._scf)
gci = gcisd.GCISD(mf)
assert(myci._nocc is None)
assert(myci._nmo is None)
gci.__dict__.update(myci.__dict__)
gci._scf = mf
gci.mo_coeff = mf.mo_coeff
gci.mo_occ = mf.mo_occ
if isinstance(myci.frozen, (int, np.integer)):
gci.frozen = myci.frozen * 2
else:
raise NotImplementedError
gci.ci = gcisd.from_rcisdvec(myci.ci, myci.nocc, mf.mo_coeff.orbspin)
return gci
| apache-2.0 |
nilo916/3d_test | test/js/doob/utils/exporters/blender/modules/msgpack/fallback.py | 641 | 26403 | """Fallback pure Python implementation of msgpack"""
import sys
import array
import struct
if sys.version_info[0] == 3:
PY3 = True
int_types = int
Unicode = str
xrange = range
def dict_iteritems(d):
return d.items()
else:
PY3 = False
int_types = (int, long)
Unicode = unicode
def dict_iteritems(d):
return d.iteritems()
if hasattr(sys, 'pypy_version_info'):
# cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own
# StringBuilder is fastest.
from __pypy__ import newlist_hint
from __pypy__.builders import StringBuilder
USING_STRINGBUILDER = True
class StringIO(object):
def __init__(self, s=b''):
if s:
self.builder = StringBuilder(len(s))
self.builder.append(s)
else:
self.builder = StringBuilder()
def write(self, s):
self.builder.append(s)
def getvalue(self):
return self.builder.build()
else:
USING_STRINGBUILDER = False
from io import BytesIO as StringIO
newlist_hint = lambda size: []
from msgpack.exceptions import (
BufferFull,
OutOfData,
UnpackValueError,
PackValueError,
ExtraData)
from msgpack import ExtType
EX_SKIP = 0
EX_CONSTRUCT = 1
EX_READ_ARRAY_HEADER = 2
EX_READ_MAP_HEADER = 3
TYPE_IMMEDIATE = 0
TYPE_ARRAY = 1
TYPE_MAP = 2
TYPE_RAW = 3
TYPE_BIN = 4
TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
def unpack(stream, **kwargs):
"""
Unpack an object from `stream`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(stream, **kwargs)
ret = unpacker._fb_unpack()
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises `ExtraData` when `packed` contains extra bytes.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._fb_unpack()
except OutOfData:
raise UnpackValueError("Data is not enough.")
if unpacker._fb_got_extradata():
raise ExtraData(ret, unpacker._fb_get_extradata())
return ret
class Unpacker(object):
"""
Streaming unpacker.
`file_like` is a file-like object having a `.read(n)` method.
When `Unpacker` is initialized with a `file_like`, `.feed()` is not
usable.
`read_size` is used for `file_like.read(read_size)`.
If `use_list` is True (default), msgpack lists are deserialized to Python
lists. Otherwise they are deserialized to tuples.
`object_hook` is the same as in simplejson. If it is not None, it should
be callable and Unpacker calls it with a dict argument after deserializing
a map.
`object_pairs_hook` is the same as in simplejson. If it is not None, it
should be callable and Unpacker calls it with a list of key-value pairs
after deserializing a map.
`ext_hook` is callback for ext (User defined) type. It called with two
arguments: (code, bytes). default: `msgpack.ExtType`
`encoding` is the encoding used for decoding msgpack bytes. If it is
None (default), msgpack bytes are deserialized to Python bytes.
`unicode_errors` is used for decoding bytes.
`max_buffer_size` limits the buffer size. 0 means INT_MAX (default).
Raises `BufferFull` exception when it is unsufficient.
You should set this parameter when unpacking data from an untrustred source.
example of streaming deserialization from file-like object::
unpacker = Unpacker(file_like)
for o in unpacker:
do_something(o)
example of streaming deserialization from socket::
unpacker = Unpacker()
while 1:
buf = sock.recv(1024*2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
do_something(o)
"""
def __init__(self, file_like=None, read_size=0, use_list=True,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors='strict', max_buffer_size=0,
ext_hook=ExtType):
if file_like is None:
self._fb_feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._fb_feeding = False
self._fb_buffers = []
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = 0
self._max_buffer_size = max_buffer_size or 2**31-1
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 2048)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
if list_hook is not None and not callable(list_hook):
raise TypeError('`list_hook` is not callable')
if object_hook is not None and not callable(object_hook):
raise TypeError('`object_hook` is not callable')
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError('`object_pairs_hook` is not callable')
if object_hook is not None and object_pairs_hook is not None:
raise TypeError("object_pairs_hook and object_hook are mutually "
"exclusive")
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
if isinstance(next_bytes, array.array):
next_bytes = next_bytes.tostring()
elif isinstance(next_bytes, bytearray):
next_bytes = bytes(next_bytes)
assert self._fb_feeding
if self._fb_buf_n + len(next_bytes) > self._max_buffer_size:
raise BufferFull
self._fb_buf_n += len(next_bytes)
self._fb_buffers.append(next_bytes)
def _fb_consume(self):
self._fb_buffers = self._fb_buffers[self._fb_buf_i:]
if self._fb_buffers:
self._fb_buffers[0] = self._fb_buffers[0][self._fb_buf_o:]
self._fb_buf_o = 0
self._fb_buf_i = 0
self._fb_buf_n = sum(map(len, self._fb_buffers))
def _fb_got_extradata(self):
if self._fb_buf_i != len(self._fb_buffers):
return True
if self._fb_feeding:
return False
if not self.file_like:
return False
if self.file_like.read(1):
return True
return False
def __iter__(self):
return self
def read_bytes(self, n):
return self._fb_read(n)
def _fb_rollback(self):
self._fb_buf_i = 0
self._fb_buf_o = 0
def _fb_get_extradata(self):
bufs = self._fb_buffers[self._fb_buf_i:]
if bufs:
bufs[0] = bufs[0][self._fb_buf_o:]
return b''.join(bufs)
def _fb_read(self, n, write_bytes=None):
buffs = self._fb_buffers
if (write_bytes is None and self._fb_buf_i < len(buffs) and
self._fb_buf_o + n < len(buffs[self._fb_buf_i])):
self._fb_buf_o += n
return buffs[self._fb_buf_i][self._fb_buf_o - n:self._fb_buf_o]
ret = b''
while len(ret) != n:
if self._fb_buf_i == len(buffs):
if self._fb_feeding:
break
tmp = self.file_like.read(self._read_size)
if not tmp:
break
buffs.append(tmp)
continue
sliced = n - len(ret)
ret += buffs[self._fb_buf_i][self._fb_buf_o:self._fb_buf_o + sliced]
self._fb_buf_o += sliced
if self._fb_buf_o >= len(buffs[self._fb_buf_i]):
self._fb_buf_o = 0
self._fb_buf_i += 1
if len(ret) != n:
self._fb_rollback()
raise OutOfData
if write_bytes is not None:
write_bytes(ret)
return ret
def _read_header(self, execute=EX_CONSTRUCT, write_bytes=None):
typ = TYPE_IMMEDIATE
n = 0
obj = None
c = self._fb_read(1, write_bytes)
b = ord(c)
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = struct.unpack("b", c)[0]
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
obj = self._fb_read(n, write_bytes)
typ = TYPE_RAW
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
elif b == 0xc0:
obj = None
elif b == 0xc2:
obj = False
elif b == 0xc3:
obj = True
elif b == 0xc4:
typ = TYPE_BIN
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc5:
typ = TYPE_BIN
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc6:
typ = TYPE_BIN
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xc7: # ext 8
typ = TYPE_EXT
L, n = struct.unpack('Bb', self._fb_read(2, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc8: # ext 16
typ = TYPE_EXT
L, n = struct.unpack('>Hb', self._fb_read(3, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xc9: # ext 32
typ = TYPE_EXT
L, n = struct.unpack('>Ib', self._fb_read(5, write_bytes))
obj = self._fb_read(L, write_bytes)
elif b == 0xca:
obj = struct.unpack(">f", self._fb_read(4, write_bytes))[0]
elif b == 0xcb:
obj = struct.unpack(">d", self._fb_read(8, write_bytes))[0]
elif b == 0xcc:
obj = struct.unpack("B", self._fb_read(1, write_bytes))[0]
elif b == 0xcd:
obj = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
elif b == 0xce:
obj = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
elif b == 0xcf:
obj = struct.unpack(">Q", self._fb_read(8, write_bytes))[0]
elif b == 0xd0:
obj = struct.unpack("b", self._fb_read(1, write_bytes))[0]
elif b == 0xd1:
obj = struct.unpack(">h", self._fb_read(2, write_bytes))[0]
elif b == 0xd2:
obj = struct.unpack(">i", self._fb_read(4, write_bytes))[0]
elif b == 0xd3:
obj = struct.unpack(">q", self._fb_read(8, write_bytes))[0]
elif b == 0xd4: # fixext 1
typ = TYPE_EXT
n, obj = struct.unpack('b1s', self._fb_read(2, write_bytes))
elif b == 0xd5: # fixext 2
typ = TYPE_EXT
n, obj = struct.unpack('b2s', self._fb_read(3, write_bytes))
elif b == 0xd6: # fixext 4
typ = TYPE_EXT
n, obj = struct.unpack('b4s', self._fb_read(5, write_bytes))
elif b == 0xd7: # fixext 8
typ = TYPE_EXT
n, obj = struct.unpack('b8s', self._fb_read(9, write_bytes))
elif b == 0xd8: # fixext 16
typ = TYPE_EXT
n, obj = struct.unpack('b16s', self._fb_read(17, write_bytes))
elif b == 0xd9:
typ = TYPE_RAW
n = struct.unpack("B", self._fb_read(1, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xda:
typ = TYPE_RAW
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdb:
typ = TYPE_RAW
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
obj = self._fb_read(n, write_bytes)
elif b == 0xdc:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xdd:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_ARRAY
elif b == 0xde:
n = struct.unpack(">H", self._fb_read(2, write_bytes))[0]
typ = TYPE_MAP
elif b == 0xdf:
n = struct.unpack(">I", self._fb_read(4, write_bytes))[0]
typ = TYPE_MAP
else:
raise UnpackValueError("Unknown header: 0x%x" % b)
return typ, n, obj
def _fb_unpack(self, execute=EX_CONSTRUCT, write_bytes=None):
typ, n, obj = self._read_header(execute, write_bytes)
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise UnpackValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise UnpackValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call `list_hook`
self._fb_unpack(EX_SKIP, write_bytes)
return
ret = newlist_hint(n)
for i in xrange(n):
ret.append(self._fb_unpack(EX_CONSTRUCT, write_bytes))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call hooks
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_unpack(EX_SKIP, write_bytes)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._fb_unpack(EX_CONSTRUCT, write_bytes),
self._fb_unpack(EX_CONSTRUCT, write_bytes))
for _ in xrange(n))
else:
ret = {}
for _ in xrange(n):
key = self._fb_unpack(EX_CONSTRUCT, write_bytes)
ret[key] = self._fb_unpack(EX_CONSTRUCT, write_bytes)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._encoding is not None:
obj = obj.decode(self._encoding, self._unicode_errors)
return obj
if typ == TYPE_EXT:
return self._ext_hook(n, obj)
if typ == TYPE_BIN:
return obj
assert typ == TYPE_IMMEDIATE
return obj
def next(self):
try:
ret = self._fb_unpack(EX_CONSTRUCT, None)
self._fb_consume()
return ret
except OutOfData:
raise StopIteration
__next__ = next
def skip(self, write_bytes=None):
self._fb_unpack(EX_SKIP, write_bytes)
self._fb_consume()
def unpack(self, write_bytes=None):
ret = self._fb_unpack(EX_CONSTRUCT, write_bytes)
self._fb_consume()
return ret
def read_array_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_ARRAY_HEADER, write_bytes)
self._fb_consume()
return ret
def read_map_header(self, write_bytes=None):
ret = self._fb_unpack(EX_READ_MAP_HEADER, write_bytes)
self._fb_consume()
return ret
class Packer(object):
"""
MessagePack Packer
usage:
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param callable default:
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param str encoding:
Convert unicode to bytes with this encoding. (default: 'utf-8')
:param str unicode_errors:
Error handler for encoding unicode. (default: 'strict')
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return it's content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enable str8 type for unicode.
"""
def __init__(self, default=None, encoding='utf-8', unicode_errors='strict',
use_single_float=False, autoreset=True, use_bin_type=False):
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._encoding = encoding
self._unicode_errors = unicode_errors
self._buffer = StringIO()
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, isinstance=isinstance):
default_used = False
while True:
if nest_limit < 0:
raise PackValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if isinstance(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if isinstance(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xff:
return self._buffer.write(struct.pack("BB", 0xcc, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
if 0xff < obj <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xcd, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
if 0xffff < obj <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xce, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
if 0xffffffff < obj <= 0xffffffffffffffff:
return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
raise PackValueError("Integer value out of range")
if self._use_bin_type and isinstance(obj, bytes):
n = len(obj)
if n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xc4, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc5, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xc6, n))
else:
raise PackValueError("Bytes is too large")
return self._buffer.write(obj)
if isinstance(obj, (Unicode, bytes)):
if isinstance(obj, Unicode):
if self._encoding is None:
raise TypeError(
"Can't encode unicode string: "
"no encoding is specified")
obj = obj.encode(self._encoding, self._unicode_errors)
n = len(obj)
if n <= 0x1f:
self._buffer.write(struct.pack('B', 0xa0 + n))
elif self._use_bin_type and n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xd9, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xda, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xdb, n))
else:
raise PackValueError("String is too large")
return self._buffer.write(obj)
if isinstance(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xca, obj))
return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
if isinstance(obj, ExtType):
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(struct.pack(">BB", 0xc7, L))
elif L <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc8, L))
else:
self._buffer.write(struct.pack(">BI", 0xc9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if isinstance(obj, (list, tuple)):
n = len(obj)
self._fb_pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
if isinstance(obj, dict):
return self._fb_pack_map_pairs(len(obj), dict_iteritems(obj),
nest_limit - 1)
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
raise TypeError("Cannot serialize %r" % obj)
def pack(self, obj):
self._pack(obj)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_pairs(self, pairs):
self._fb_pack_map_pairs(len(pairs), pairs)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_array_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_array_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_map_header(self, n):
if n >= 2**32:
raise ValueError
self._fb_pack_map_header(n)
ret = self._buffer.getvalue()
if self._autoreset:
self._buffer = StringIO()
elif USING_STRINGBUILDER:
self._buffer = StringIO(ret)
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xffffffff:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(b'\xc7' + struct.pack('B', L))
elif L <= 0xffff:
self._buffer.write(b'\xc8' + struct.pack('>H', L))
else:
self._buffer.write(b'\xc9' + struct.pack('>I', L))
self._buffer.write(struct.pack('B', typecode))
self._buffer.write(data)
def _fb_pack_array_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x90 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xdc, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdd, n))
raise PackValueError("Array is too large")
def _fb_pack_map_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x80 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xde, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdf, n))
raise PackValueError("Dict is too large")
def _fb_pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._fb_pack_map_header(n)
for (k, v) in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def bytes(self):
return self._buffer.getvalue()
def reset(self):
self._buffer = StringIO()
| mit |
KohlsTechnology/ansible | lib/ansible/playbook/role/requirement.py | 19 | 8225 | # (c) 2014 Michael DeHaan, <michael@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import tempfile
import tarfile
from subprocess import Popen, PIPE
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.module_utils.six import string_types
from ansible.playbook.role.definition import RoleDefinition
__all__ = ['RoleRequirement']
VALID_SPEC_KEYS = [
'name',
'role',
'scm',
'src',
'version',
]
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class RoleRequirement(RoleDefinition):
"""
Helper class for Galaxy, which is used to parse both dependencies
specified in meta/main.yml and requirements.yml files.
"""
def __init__(self):
pass
@staticmethod
def repo_url_to_role_name(repo_url):
# gets the role name out of a repo like
# http://git.example.com/repos/repo.git" => "repo"
if '://' not in repo_url and '@' not in repo_url:
return repo_url
trailing_path = repo_url.split('/')[-1]
if trailing_path.endswith('.git'):
trailing_path = trailing_path[:-4]
if trailing_path.endswith('.tar.gz'):
trailing_path = trailing_path[:-7]
if ',' in trailing_path:
trailing_path = trailing_path.split(',')[0]
return trailing_path
@staticmethod
def role_spec_parse(role_spec):
# takes a repo and a version like
# git+http://git.example.com/repos/repo.git,v1.0
# and returns a list of properties such as:
# {
# 'scm': 'git',
# 'src': 'http://git.example.com/repos/repo.git',
# 'version': 'v1.0',
# 'name': 'repo'
# }
display.deprecated("The comma separated role spec format, use the yaml/explicit format instead. Line that trigger this: %s" % role_spec,
version="2.7")
default_role_versions = dict(git='master', hg='tip')
role_spec = role_spec.strip()
role_version = ''
if role_spec == "" or role_spec.startswith("#"):
return (None, None, None, None)
tokens = [s.strip() for s in role_spec.split(',')]
# assume https://github.com URLs are git+https:// URLs and not
# tarballs unless they end in '.zip'
if 'github.com/' in tokens[0] and not tokens[0].startswith("git+") and not tokens[0].endswith('.tar.gz'):
tokens[0] = 'git+' + tokens[0]
if '+' in tokens[0]:
(scm, role_url) = tokens[0].split('+')
else:
scm = None
role_url = tokens[0]
if len(tokens) >= 2:
role_version = tokens[1]
if len(tokens) == 3:
role_name = tokens[2]
else:
role_name = RoleRequirement.repo_url_to_role_name(tokens[0])
if scm and not role_version:
role_version = default_role_versions.get(scm, '')
return dict(scm=scm, src=role_url, version=role_version, name=role_name)
@staticmethod
def role_yaml_parse(role):
if isinstance(role, string_types):
name = None
scm = None
src = None
version = None
if ',' in role:
if role.count(',') == 1:
(src, version) = role.strip().split(',', 1)
elif role.count(',') == 2:
(src, version, name) = role.strip().split(',', 2)
else:
raise AnsibleError("Invalid role line (%s). Proper format is 'role_name[,version[,name]]'" % role)
else:
src = role
if name is None:
name = RoleRequirement.repo_url_to_role_name(src)
if '+' in src:
(scm, src) = src.split('+', 1)
return dict(name=name, src=src, scm=scm, version=version)
if 'role' in role:
name = role['role']
if ',' in name:
# Old style: {role: "galaxy.role,version,name", other_vars: "here" }
role = RoleRequirement.role_spec_parse(role['role'])
else:
del role['role']
role['name'] = name
else:
role = role.copy()
if 'src'in role:
# New style: { src: 'galaxy.role,version,name', other_vars: "here" }
if 'github.com' in role["src"] and 'http' in role["src"] and '+' not in role["src"] and not role["src"].endswith('.tar.gz'):
role["src"] = "git+" + role["src"]
if '+' in role["src"]:
(scm, src) = role["src"].split('+')
role["scm"] = scm
role["src"] = src
if 'name' not in role:
role["name"] = RoleRequirement.repo_url_to_role_name(role["src"])
if 'version' not in role:
role['version'] = ''
if 'scm' not in role:
role['scm'] = None
for key in list(role.keys()):
if key not in VALID_SPEC_KEYS:
role.pop(key)
return role
@staticmethod
def scm_archive_role(src, scm='git', name=None, version='HEAD', keep_scm_meta=False):
def run_scm_cmd(cmd, tempdir):
try:
popen = Popen(cmd, cwd=tempdir, stdout=PIPE, stderr=PIPE)
stdout, stderr = popen.communicate()
except Exception as e:
ran = " ".join(cmd)
display.debug("ran %s:" % ran)
display.debug("\tstdout: " + stdout)
display.debug("\tstderr: " + stderr)
raise AnsibleError("when executing %s: %s" % (ran, to_native(e)))
if popen.returncode != 0:
raise AnsibleError("- command %s failed in directory %s (rc=%s)" % (' '.join(cmd), tempdir, popen.returncode))
if scm not in ['hg', 'git']:
raise AnsibleError("- scm %s is not currently supported" % scm)
tempdir = tempfile.mkdtemp(dir=C.DEFAULT_LOCAL_TMP)
clone_cmd = [scm, 'clone', src, name]
run_scm_cmd(clone_cmd, tempdir)
if scm == 'git' and version:
checkout_cmd = [scm, 'checkout', version]
run_scm_cmd(checkout_cmd, os.path.join(tempdir, name))
temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.tar', dir=C.DEFAULT_LOCAL_TMP)
archive_cmd = None
if keep_scm_meta:
display.vvv('tarring %s from %s to %s' % (name, tempdir, temp_file.name))
with tarfile.open(temp_file.name, "w") as tar:
tar.add(os.path.join(tempdir, name), arcname=name)
elif scm == 'hg':
archive_cmd = ['hg', 'archive', '--prefix', "%s/" % name]
if version:
archive_cmd.extend(['-r', version])
archive_cmd.append(temp_file.name)
elif scm == 'git':
archive_cmd = ['git', 'archive', '--prefix=%s/' % name, '--output=%s' % temp_file.name]
if version:
archive_cmd.append(version)
else:
archive_cmd.append('HEAD')
if archive_cmd is not None:
display.vvv('archiving %s' % archive_cmd)
run_scm_cmd(archive_cmd, os.path.join(tempdir, name))
return temp_file.name
| gpl-3.0 |
defionscode/ansible | test/units/modules/network/dellos9/test_dellos9_facts.py | 56 | 4619 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from units.modules.utils import set_module_args
from .dellos9_module import TestDellos9Module, load_fixture
from ansible.modules.network.dellos9 import dellos9_facts
class TestDellos9Facts(TestDellos9Module):
module = dellos9_facts
def setUp(self):
super(TestDellos9Facts, self).setUp()
self.mock_run_command = patch(
'ansible.modules.network.dellos9.dellos9_facts.run_commands')
self.run_command = self.mock_run_command.start()
def tearDown(self):
super(TestDellos9Facts, self).tearDown()
self.mock_run_command.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
if '|' in command:
command = str(command).replace('|', '')
filename = str(command).replace(' ', '_')
filename = filename.replace('/', '7')
output.append(load_fixture(filename))
return output
self.run_command.side_effect = load_from_file
def test_dellos9_facts_gather_subset_default(self):
set_module_args(dict())
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('dellos9_sw1', ansible_facts['ansible_net_hostname'])
self.assertIn('fortyGigE 0/24', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(3128820, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(3125722, ansible_facts['ansible_net_memfree_mb'])
def test_dellos9_facts_gather_subset_config(self):
set_module_args({'gather_subset': 'config'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('dellos9_sw1', ansible_facts['ansible_net_hostname'])
self.assertIn('ansible_net_config', ansible_facts)
def test_dellos9_facts_gather_subset_hardware(self):
set_module_args({'gather_subset': 'hardware'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertEquals(['flash', 'fcmfs', 'nfsmount', 'ftp', 'tftp', 'scp', 'http', 'https'], ansible_facts['ansible_net_filesystems'])
self.assertEquals(3128820, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(3125722, ansible_facts['ansible_net_memfree_mb'])
def test_dellos9_facts_gather_subset_interfaces(self):
set_module_args({'gather_subset': 'interfaces'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertIn('fortyGigE 0/24', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(['Ma 0/0'], list(ansible_facts['ansible_net_neighbors'].keys()))
self.assertIn('ansible_net_interfaces', ansible_facts)
| gpl-3.0 |
LCAS/zoidbot | circle_detection/templates/generate_circle.py | 4 | 2016 | import cv2
from fpdf import FPDF
import numpy as np
import os
import sys
circleDiameter = 50
numOfCircles = 9
################################################################################
####################### DO NOT EDIT BELOW THIS LINE! ###########################
################################################################################
numberPerRow = (210 / (circleDiameter + (circleDiameter / 5)))
numberPerColum = (297 / (circleDiameter + (circleDiameter / 5)))
def gen_single_circle(outsideCircleRad, insideCircleRad):
img = np.ones(((outsideCircleRad * 2) + 12, (outsideCircleRad * 2) + 12, 3), np.uint8)
img[:, :] = (255, 255, 255)
for i in range(outsideCircleRad, insideCircleRad, -2):
cv2.circle(img, (outsideCircleRad + 6, outsideCircleRad + 6), i, (0, 0, 0), 2)
return img
def gen_pdf():
pdf = FPDF()
pdf.add_page()
xPos = 0
yPos = 1
for i in range(1, numOfCircles + 1):
increments = 200 / numOfCircles
cv2.imwrite(str(i) + 'circle.png', gen_single_circle(500+100, ((increments * i)+100)))
cv2.imshow('circle.png', gen_single_circle(500+100, ((increments * i)+100)))
k = cv2.waitKey(200) & 0xFF
xPos += 1
x = (xPos * (circleDiameter + (circleDiameter / 5)))-circleDiameter
y = (yPos * (circleDiameter + (circleDiameter / 5)))
sys.stdout.write('(' + str(x) + ' , ' + str(y) + ') ')
pdf.image(str(i) + 'circle.png', x, y, circleDiameter + 3, circleDiameter + 3, 'png')
os.remove(str(i) + 'circle.png');
if xPos > numberPerRow-1:
print ""
yPos += 1
xPos = 0
if yPos > numberPerColum:
if (i != (numOfCircles)):
pdf.add_page()
print "-------------------------------------"
xPos = 0
yPos = 1
pdf.output('BETA.pdf', 'F')
gen_pdf()
#cv2.imshow('image', a);
#k = cv2.waitKey(1000) & 0xFF | mit |
kustodian/ansible | test/units/vars/test_module_response_deepcopy.py | 118 | 1473 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <matt@sivel.net>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ansible.vars.clean import module_response_deepcopy
import pytest
def test_module_response_deepcopy_basic():
x = 42
y = module_response_deepcopy(x)
assert y == x
def test_module_response_deepcopy_atomic():
tests = [None, 42, 2**100, 3.14, True, False, 1j,
"hello", u"hello\u1234"]
for x in tests:
assert module_response_deepcopy(x) is x
def test_module_response_deepcopy_list():
x = [[1, 2], 3]
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x[0] is not y[0]
def test_module_response_deepcopy_empty_tuple():
x = ()
y = module_response_deepcopy(x)
assert x is y
@pytest.mark.skip(reason='No current support for this situation')
def test_module_response_deepcopy_tuple():
x = ([1, 2], 3)
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x[0] is not y[0]
def test_module_response_deepcopy_tuple_of_immutables():
x = ((1, 2), 3)
y = module_response_deepcopy(x)
assert x is y
def test_module_response_deepcopy_dict():
x = {"foo": [1, 2], "bar": 3}
y = module_response_deepcopy(x)
assert y == x
assert x is not y
assert x["foo"] is not y["foo"]
| gpl-3.0 |
raphaelmerx/django | tests/syndication_tests/urls.py | 216 | 1381 | from django.conf.urls import url
from . import feeds
urlpatterns = [
url(r'^syndication/rss2/$', feeds.TestRss2Feed()),
url(r'^syndication/rss2/guid_ispermalink_true/$',
feeds.TestRss2FeedWithGuidIsPermaLinkTrue()),
url(r'^syndication/rss2/guid_ispermalink_false/$',
feeds.TestRss2FeedWithGuidIsPermaLinkFalse()),
url(r'^syndication/rss091/$', feeds.TestRss091Feed()),
url(r'^syndication/no_pubdate/$', feeds.TestNoPubdateFeed()),
url(r'^syndication/atom/$', feeds.TestAtomFeed()),
url(r'^syndication/latest/$', feeds.TestLatestFeed()),
url(r'^syndication/custom/$', feeds.TestCustomFeed()),
url(r'^syndication/naive-dates/$', feeds.NaiveDatesFeed()),
url(r'^syndication/aware-dates/$', feeds.TZAwareDatesFeed()),
url(r'^syndication/feedurl/$', feeds.TestFeedUrlFeed()),
url(r'^syndication/articles/$', feeds.ArticlesFeed()),
url(r'^syndication/template/$', feeds.TemplateFeed()),
url(r'^syndication/template_context/$', feeds.TemplateContextFeed()),
url(r'^syndication/rss2/single-enclosure/$', feeds.TestSingleEnclosureRSSFeed()),
url(r'^syndication/rss2/multiple-enclosure/$', feeds.TestMultipleEnclosureRSSFeed()),
url(r'^syndication/atom/single-enclosure/$', feeds.TestSingleEnclosureAtomFeed()),
url(r'^syndication/atom/multiple-enclosure/$', feeds.TestMultipleEnclosureAtomFeed()),
]
| bsd-3-clause |
gymnasium/edx-platform | lms/djangoapps/support/views/certificate.py | 23 | 1319 | """
Certificate tool in the student support app.
"""
import urllib
from django.utils.decorators import method_decorator
from django.views.generic import View
from edxmako.shortcuts import render_to_response
from support.decorators import require_support_permission
class CertificatesSupportView(View):
"""
View for viewing and regenerating certificates for users.
This is used by the support team to re-issue certificates
to users if something went wrong during the initial certificate generation,
such as:
* The user's name was spelled incorrectly.
* The user later earned a higher grade and wants it on his/her certificate and dashboard.
* The user accidentally received an honor code certificate because his/her
verification expired before certs were generated.
Most of the heavy lifting is performed client-side through API
calls directly to the certificates app.
"""
@method_decorator(require_support_permission)
def get(self, request):
"""Render the certificates support view. """
context = {
"user_filter": urllib.unquote(urllib.quote_plus(request.GET.get("user", ""))),
"course_filter": request.GET.get("course_id", "")
}
return render_to_response("support/certificates.html", context)
| agpl-3.0 |
zhjunlang/kbengine | kbe/src/lib/python/Lib/tkinter/test/support.py | 59 | 3128 | import sys
import tkinter
import unittest
from test.support import requires
class AbstractTkTest:
@classmethod
def setUpClass(cls):
cls._old_support_default_root = tkinter._support_default_root
destroy_default_root()
tkinter.NoDefaultRoot()
cls.root = tkinter.Tk()
cls.wantobjects = cls.root.wantobjects()
# De-maximize main window.
# Some window managers can maximize new windows.
cls.root.wm_state('normal')
try:
cls.root.wm_attributes('-zoomed', False)
except tkinter.TclError:
pass
@classmethod
def tearDownClass(cls):
cls.root.destroy()
cls.root = None
tkinter._default_root = None
tkinter._support_default_root = cls._old_support_default_root
def setUp(self):
self.root.deiconify()
def tearDown(self):
for w in self.root.winfo_children():
w.destroy()
self.root.withdraw()
def destroy_default_root():
if getattr(tkinter, '_default_root', None):
tkinter._default_root.update_idletasks()
tkinter._default_root.destroy()
tkinter._default_root = None
def simulate_mouse_click(widget, x, y):
"""Generate proper events to click at the x, y position (tries to act
like an X server)."""
widget.event_generate('<Enter>', x=0, y=0)
widget.event_generate('<Motion>', x=x, y=y)
widget.event_generate('<ButtonPress-1>', x=x, y=y)
widget.event_generate('<ButtonRelease-1>', x=x, y=y)
import _tkinter
tcl_version = tuple(map(int, _tkinter.TCL_VERSION.split('.')))
def requires_tcl(*version):
return unittest.skipUnless(tcl_version >= version,
'requires Tcl version >= ' + '.'.join(map(str, version)))
_tk_patchlevel = None
def get_tk_patchlevel():
global _tk_patchlevel
if _tk_patchlevel is None:
tcl = tkinter.Tcl()
patchlevel = []
for x in tcl.call('info', 'patchlevel').split('.'):
try:
x = int(x, 10)
except ValueError:
x = -1
patchlevel.append(x)
_tk_patchlevel = tuple(patchlevel)
return _tk_patchlevel
units = {
'c': 72 / 2.54, # centimeters
'i': 72, # inches
'm': 72 / 25.4, # millimeters
'p': 1, # points
}
def pixels_conv(value):
return float(value[:-1]) * units[value[-1:]]
def tcl_obj_eq(actual, expected):
if actual == expected:
return True
if isinstance(actual, _tkinter.Tcl_Obj):
if isinstance(expected, str):
return str(actual) == expected
if isinstance(actual, tuple):
if isinstance(expected, tuple):
return (len(actual) == len(expected) and
all(tcl_obj_eq(act, exp)
for act, exp in zip(actual, expected)))
return False
def widget_eq(actual, expected):
if actual == expected:
return True
if isinstance(actual, (str, tkinter.Widget)):
if isinstance(expected, (str, tkinter.Widget)):
return str(actual) == str(expected)
return False
| lgpl-3.0 |
emedinaa/contentbox | third_party/social/storage/mongoengine_orm.py | 83 | 5914 | import base64
import six
from mongoengine import DictField, IntField, StringField, \
EmailField, BooleanField
from mongoengine.queryset import OperationError
from social.storage.base import UserMixin, AssociationMixin, NonceMixin, \
CodeMixin, BaseStorage
UNUSABLE_PASSWORD = '!' # Borrowed from django 1.4
class MongoengineUserMixin(UserMixin):
"""Social Auth association model"""
user = None
provider = StringField(max_length=32)
uid = StringField(max_length=255, unique_with='provider')
extra_data = DictField()
def str_id(self):
return str(self.id)
@classmethod
def get_social_auth_for_user(cls, user, provider=None, id=None):
qs = cls.objects
if provider:
qs = qs.filter(provider=provider)
if id:
qs = qs.filter(id=id)
return qs.filter(user=user.id)
@classmethod
def create_social_auth(cls, user, uid, provider):
if not isinstance(type(uid), six.string_types):
uid = str(uid)
return cls.objects.create(user=user.id, uid=uid, provider=provider)
@classmethod
def username_max_length(cls):
username_field = cls.username_field()
field = getattr(cls.user_model(), username_field)
return field.max_length
@classmethod
def username_field(cls):
return getattr(cls.user_model(), 'USERNAME_FIELD', 'username')
@classmethod
def create_user(cls, *args, **kwargs):
kwargs['password'] = UNUSABLE_PASSWORD
if 'email' in kwargs:
# Empty string makes email regex validation fail
kwargs['email'] = kwargs['email'] or None
return cls.user_model().objects.create(*args, **kwargs)
@classmethod
def allowed_to_disconnect(cls, user, backend_name, association_id=None):
if association_id is not None:
qs = cls.objects.filter(id__ne=association_id)
else:
qs = cls.objects.filter(provider__ne=backend_name)
qs = qs.filter(user=user)
if hasattr(user, 'has_usable_password'):
valid_password = user.has_usable_password()
else:
valid_password = True
return valid_password or qs.count() > 0
@classmethod
def changed(cls, user):
user.save()
def set_extra_data(self, extra_data=None):
if super(MongoengineUserMixin, self).set_extra_data(extra_data):
self.save()
@classmethod
def disconnect(cls, entry):
entry.delete()
@classmethod
def user_exists(cls, *args, **kwargs):
"""
Return True/False if a User instance exists with the given arguments.
Arguments are directly passed to filter() manager method.
"""
if 'username' in kwargs:
kwargs[cls.username_field()] = kwargs.pop('username')
return cls.user_model().objects.filter(*args, **kwargs).count() > 0
@classmethod
def get_username(cls, user):
return getattr(user, cls.username_field(), None)
@classmethod
def get_user(cls, pk):
try:
return cls.user_model().objects.get(id=pk)
except cls.user_model().DoesNotExist:
return None
@classmethod
def get_users_by_email(cls, email):
return cls.user_model().objects.filter(email__iexact=email)
@classmethod
def get_social_auth(cls, provider, uid):
if not isinstance(uid, six.string_types):
uid = str(uid)
try:
return cls.objects.get(provider=provider, uid=uid)
except cls.DoesNotExist:
return None
class MongoengineNonceMixin(NonceMixin):
"""One use numbers"""
server_url = StringField(max_length=255)
timestamp = IntField()
salt = StringField(max_length=40)
@classmethod
def use(cls, server_url, timestamp, salt):
return cls.objects.get_or_create(server_url=server_url,
timestamp=timestamp,
salt=salt)[1]
class MongoengineAssociationMixin(AssociationMixin):
"""OpenId account association"""
server_url = StringField(max_length=255)
handle = StringField(max_length=255)
secret = StringField(max_length=255) # Stored base64 encoded
issued = IntField()
lifetime = IntField()
assoc_type = StringField(max_length=64)
@classmethod
def store(cls, server_url, association):
# Don't use get_or_create because issued cannot be null
try:
assoc = cls.objects.get(server_url=server_url,
handle=association.handle)
except cls.DoesNotExist:
assoc = cls(server_url=server_url,
handle=association.handle)
assoc.secret = base64.encodestring(association.secret)
assoc.issued = association.issued
assoc.lifetime = association.lifetime
assoc.assoc_type = association.assoc_type
assoc.save()
@classmethod
def get(cls, *args, **kwargs):
return cls.objects.filter(*args, **kwargs)
@classmethod
def remove(cls, ids_to_delete):
cls.objects.filter(pk__in=ids_to_delete).delete()
class MongoengineCodeMixin(CodeMixin):
email = EmailField()
code = StringField(max_length=32)
verified = BooleanField(default=False)
@classmethod
def get_code(cls, code):
try:
return cls.objects.get(code=code)
except cls.DoesNotExist:
return None
class BaseMongoengineStorage(BaseStorage):
user = MongoengineUserMixin
nonce = MongoengineNonceMixin
association = MongoengineAssociationMixin
code = MongoengineCodeMixin
@classmethod
def is_integrity_error(cls, exception):
return exception.__class__ is OperationError and \
'E11000' in exception.message
| apache-2.0 |
mushyshah/ELEC490G11 | node_modules/node-gyp/gyp/pylib/gyp/generator/make.py | 388 | 91069 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Notes:
#
# This is all roughly based on the Makefile system used by the Linux
# kernel, but is a non-recursive make -- we put the entire dependency
# graph in front of make and let it figure it out.
#
# The code below generates a separate .mk file for each target, but
# all are sourced by the top-level Makefile. This means that all
# variables in .mk-files clobber one another. Be careful to use :=
# where appropriate for immediate evaluation, and similarly to watch
# that you're not relying on a variable value to last beween different
# .mk files.
#
# TODOs:
#
# Global settings and utility functions are currently stuffed in the
# toplevel Makefile. It may make sense to generate some .mk files on
# the side to keep the the files readable.
import os
import re
import sys
import subprocess
import gyp
import gyp.common
import gyp.xcode_emulation
from gyp.common import GetEnvironFallback
from gyp.common import GypError
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'INTERMEDIATE_DIR': '$(obj).$(TOOLSET)/$(TARGET)/geni',
'SHARED_INTERMEDIATE_DIR': '$(obj)/gen',
'PRODUCT_DIR': '$(builddir)',
'RULE_INPUT_ROOT': '%(INPUT_ROOT)s', # This gets expanded by Python.
'RULE_INPUT_DIRNAME': '%(INPUT_DIRNAME)s', # This gets expanded by Python.
'RULE_INPUT_PATH': '$(abspath $<)',
'RULE_INPUT_EXT': '$(suffix $<)',
'RULE_INPUT_NAME': '$(notdir $<)',
'CONFIGURATION_NAME': '$(BUILDTYPE)',
}
# Make supports multiple toolsets
generator_supports_multiple_toolsets = True
# Request sorted dependencies in the order from dependents to dependencies.
generator_wants_sorted_dependencies = False
# Placates pylint.
generator_additional_non_configuration_keys = []
generator_additional_path_sections = []
generator_extra_sources_for_rules = []
generator_filelist_paths = None
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
flavor = gyp.common.GetFlavor(params)
if flavor == 'mac':
default_variables.setdefault('OS', 'mac')
default_variables.setdefault('SHARED_LIB_SUFFIX', '.dylib')
default_variables.setdefault('SHARED_LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
default_variables.setdefault('LIB_DIR',
generator_default_variables['PRODUCT_DIR'])
# Copy additional generator configuration data from Xcode, which is shared
# by the Mac Make generator.
import gyp.generator.xcode as xcode_generator
global generator_additional_non_configuration_keys
generator_additional_non_configuration_keys = getattr(xcode_generator,
'generator_additional_non_configuration_keys', [])
global generator_additional_path_sections
generator_additional_path_sections = getattr(xcode_generator,
'generator_additional_path_sections', [])
global generator_extra_sources_for_rules
generator_extra_sources_for_rules = getattr(xcode_generator,
'generator_extra_sources_for_rules', [])
COMPILABLE_EXTENSIONS.update({'.m': 'objc', '.mm' : 'objcxx'})
else:
operating_system = flavor
if flavor == 'android':
operating_system = 'linux' # Keep this legacy behavior for now.
default_variables.setdefault('OS', operating_system)
default_variables.setdefault('SHARED_LIB_SUFFIX', '.so')
default_variables.setdefault('SHARED_LIB_DIR','$(builddir)/lib.$(TOOLSET)')
default_variables.setdefault('LIB_DIR', '$(obj).$(TOOLSET)')
def CalculateGeneratorInputInfo(params):
"""Calculate the generator specific info that gets fed to input (called by
gyp)."""
generator_flags = params.get('generator_flags', {})
android_ndk_version = generator_flags.get('android_ndk_version', None)
# Android NDK requires a strict link order.
if android_ndk_version:
global generator_wants_sorted_dependencies
generator_wants_sorted_dependencies = True
output_dir = params['options'].generator_output or \
params['options'].toplevel_dir
builddir_name = generator_flags.get('output_dir', 'out')
qualified_out_dir = os.path.normpath(os.path.join(
output_dir, builddir_name, 'gypfiles'))
global generator_filelist_paths
generator_filelist_paths = {
'toplevel': params['options'].toplevel_dir,
'qualified_out_dir': qualified_out_dir,
}
# The .d checking code below uses these functions:
# wildcard, sort, foreach, shell, wordlist
# wildcard can handle spaces, the rest can't.
# Since I could find no way to make foreach work with spaces in filenames
# correctly, the .d files have spaces replaced with another character. The .d
# file for
# Chromium\ Framework.framework/foo
# is for example
# out/Release/.deps/out/Release/Chromium?Framework.framework/foo
# This is the replacement character.
SPACE_REPLACEMENT = '?'
LINK_COMMANDS_LINUX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
# We support two kinds of shared objects (.so):
# 1) shared_library, which is just bundling together many dependent libraries
# into a link line.
# 2) loadable_module, which is generating a module intended for dlopen().
#
# They differ only slightly:
# In the former case, we want to package all dependent code into the .so.
# In the latter case, we want to package just the API exposed by the
# outermost module.
# This means shared_library uses --whole-archive, while loadable_module doesn't.
# (Note that --whole-archive is incompatible with the --start-group used in
# normal linking.)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
"""
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -bundle $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_ANDROID = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) crsT $@ $(filter %.o,$^)
# Due to circular dependencies between libraries :(, we wrap the
# special "figure out circular dependencies" flags around the entire
# input list during linking.
quiet_cmd_link = LINK($(TOOLSET)) $@
quiet_cmd_link_host = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ -Wl,--start-group $(LD_INPUTS) -Wl,--end-group $(LIBS)
cmd_link_host = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
# Other shared-object link notes:
# - Set SONAME to the library filename so our binaries don't reference
# the local, absolute paths used on the link command-line.
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--whole-archive $(LD_INPUTS) -Wl,--no-whole-archive $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ -Wl,--start-group $(filter-out FORCE_DO_CMD, $^) -Wl,--end-group $(LIBS)
quiet_cmd_solink_module_host = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module_host = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -Wl,-soname=$(@F) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
LINK_COMMANDS_AIX = """\
quiet_cmd_alink = AR($(TOOLSET)) $@
cmd_alink = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_alink_thin = AR($(TOOLSET)) $@
cmd_alink_thin = rm -f $@ && $(AR.$(TOOLSET)) -X32_64 crs $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink = SOLINK($(TOOLSET)) $@
cmd_solink = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(LD_INPUTS) $(LIBS)
quiet_cmd_solink_module = SOLINK_MODULE($(TOOLSET)) $@
cmd_solink_module = $(LINK.$(TOOLSET)) -shared $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o $@ $(filter-out FORCE_DO_CMD, $^) $(LIBS)
"""
# Header of toplevel Makefile.
# This should go into the build tree, but it's easier to keep it here for now.
SHARED_HEADER = ("""\
# We borrow heavily from the kernel build setup, though we are simpler since
# we don't have Kconfig tweaking settings on us.
# The implicit make rules have it looking for RCS files, among other things.
# We instead explicitly write all the rules we care about.
# It's even quicker (saves ~200ms) to pass -r on the command line.
MAKEFLAGS=-r
# The source directory tree.
srcdir := %(srcdir)s
abs_srcdir := $(abspath $(srcdir))
# The name of the builddir.
builddir_name ?= %(builddir)s
# The V=1 flag on command line makes us verbosely print command lines.
ifdef V
quiet=
else
quiet=quiet_
endif
# Specify BUILDTYPE=Release on the command line for a release build.
BUILDTYPE ?= %(default_configuration)s
# Directory all our build output goes into.
# Note that this must be two directories beneath src/ for unit tests to pass,
# as they reach into the src/ directory for data with relative paths.
builddir ?= $(builddir_name)/$(BUILDTYPE)
abs_builddir := $(abspath $(builddir))
depsdir := $(builddir)/.deps
# Object output directory.
obj := $(builddir)/obj
abs_obj := $(abspath $(obj))
# We build up a list of every single one of the targets so we can slurp in the
# generated dependency rule Makefiles in one pass.
all_deps :=
%(make_global_settings)s
CC.target ?= %(CC.target)s
CFLAGS.target ?= $(CPPFLAGS) $(CFLAGS)
CXX.target ?= %(CXX.target)s
CXXFLAGS.target ?= $(CPPFLAGS) $(CXXFLAGS)
LINK.target ?= %(LINK.target)s
LDFLAGS.target ?= $(LDFLAGS)
AR.target ?= $(AR)
# C++ apps need to be linked with g++.
LINK ?= $(CXX.target)
# TODO(evan): move all cross-compilation logic to gyp-time so we don't need
# to replicate this environment fallback in make as well.
CC.host ?= %(CC.host)s
CFLAGS.host ?= $(CPPFLAGS_host) $(CFLAGS_host)
CXX.host ?= %(CXX.host)s
CXXFLAGS.host ?= $(CPPFLAGS_host) $(CXXFLAGS_host)
LINK.host ?= %(LINK.host)s
LDFLAGS.host ?=
AR.host ?= %(AR.host)s
# Define a dir function that can handle spaces.
# http://www.gnu.org/software/make/manual/make.html#Syntax-of-Functions
# "leading spaces cannot appear in the text of the first argument as written.
# These characters can be put into the argument value by variable substitution."
empty :=
space := $(empty) $(empty)
# http://stackoverflow.com/questions/1189781/using-make-dir-or-notdir-on-a-path-with-spaces
replace_spaces = $(subst $(space),""" + SPACE_REPLACEMENT + """,$1)
unreplace_spaces = $(subst """ + SPACE_REPLACEMENT + """,$(space),$1)
dirx = $(call unreplace_spaces,$(dir $(call replace_spaces,$1)))
# Flags to make gcc output dependency info. Note that you need to be
# careful here to use the flags that ccache and distcc can understand.
# We write to a dep file on the side first and then rename at the end
# so we can't end up with a broken dep file.
depfile = $(depsdir)/$(call replace_spaces,$@).d
DEPFLAGS = -MMD -MF $(depfile).raw
# We have to fixup the deps output in a few ways.
# (1) the file output should mention the proper .o file.
# ccache or distcc lose the path to the target, so we convert a rule of
# the form:
# foobar.o: DEP1 DEP2
# into
# path/to/foobar.o: DEP1 DEP2
# (2) we want missing files not to cause us to fail to build.
# We want to rewrite
# foobar.o: DEP1 DEP2 \\
# DEP3
# to
# DEP1:
# DEP2:
# DEP3:
# so if the files are missing, they're just considered phony rules.
# We have to do some pretty insane escaping to get those backslashes
# and dollar signs past make, the shell, and sed at the same time.
# Doesn't work with spaces, but that's fine: .d files have spaces in
# their names replaced with other characters."""
r"""
define fixup_dep
# The depfile may not exist if the input file didn't have any #includes.
touch $(depfile).raw
# Fixup path as in (1).
sed -e "s|^$(notdir $@)|$@|" $(depfile).raw >> $(depfile)
# Add extra rules as in (2).
# We remove slashes and replace spaces with new lines;
# remove blank lines;
# delete the first line and append a colon to the remaining lines.
sed -e 's|\\||' -e 'y| |\n|' $(depfile).raw |\
grep -v '^$$' |\
sed -e 1d -e 's|$$|:|' \
>> $(depfile)
rm $(depfile).raw
endef
"""
"""
# Command definitions:
# - cmd_foo is the actual command to run;
# - quiet_cmd_foo is the brief-output summary of the command.
quiet_cmd_cc = CC($(TOOLSET)) $@
cmd_cc = $(CC.$(TOOLSET)) $(GYP_CFLAGS) $(DEPFLAGS) $(CFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_cxx = CXX($(TOOLSET)) $@
cmd_cxx = $(CXX.$(TOOLSET)) $(GYP_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
%(extra_commands)s
quiet_cmd_touch = TOUCH $@
cmd_touch = touch $@
quiet_cmd_copy = COPY $@
# send stderr to /dev/null to ignore messages when linking directories.
cmd_copy = rm -rf "$@" && cp %(copy_archive_args)s "$<" "$@"
%(link_commands)s
"""
r"""
# Define an escape_quotes function to escape single quotes.
# This allows us to handle quotes properly as long as we always use
# use single quotes and escape_quotes.
escape_quotes = $(subst ','\'',$(1))
# This comment is here just to include a ' to unconfuse syntax highlighting.
# Define an escape_vars function to escape '$' variable syntax.
# This allows us to read/write command lines with shell variables (e.g.
# $LD_LIBRARY_PATH), without triggering make substitution.
escape_vars = $(subst $$,$$$$,$(1))
# Helper that expands to a shell command to echo a string exactly as it is in
# make. This uses printf instead of echo because printf's behaviour with respect
# to escape sequences is more portable than echo's across different shells
# (e.g., dash, bash).
exact_echo = printf '%%s\n' '$(call escape_quotes,$(1))'
"""
"""
# Helper to compare the command we're about to run against the command
# we logged the last time we ran the command. Produces an empty
# string (false) when the commands match.
# Tricky point: Make has no string-equality test function.
# The kernel uses the following, but it seems like it would have false
# positives, where one string reordered its arguments.
# arg_check = $(strip $(filter-out $(cmd_$(1)), $(cmd_$@)) \\
# $(filter-out $(cmd_$@), $(cmd_$(1))))
# We instead substitute each for the empty string into the other, and
# say they're equal if both substitutions produce the empty string.
# .d files contain """ + SPACE_REPLACEMENT + \
""" instead of spaces, take that into account.
command_changed = $(or $(subst $(cmd_$(1)),,$(cmd_$(call replace_spaces,$@))),\\
$(subst $(cmd_$(call replace_spaces,$@)),,$(cmd_$(1))))
# Helper that is non-empty when a prerequisite changes.
# Normally make does this implicitly, but we force rules to always run
# so we can check their command lines.
# $? -- new prerequisites
# $| -- order-only dependencies
prereq_changed = $(filter-out FORCE_DO_CMD,$(filter-out $|,$?))
# Helper that executes all postbuilds until one fails.
define do_postbuilds
@E=0;\\
for p in $(POSTBUILDS); do\\
eval $$p;\\
E=$$?;\\
if [ $$E -ne 0 ]; then\\
break;\\
fi;\\
done;\\
if [ $$E -ne 0 ]; then\\
rm -rf "$@";\\
exit $$E;\\
fi
endef
# do_cmd: run a command via the above cmd_foo names, if necessary.
# Should always run for a given target to handle command-line changes.
# Second argument, if non-zero, makes it do asm/C/C++ dependency munging.
# Third argument, if non-zero, makes it do POSTBUILDS processing.
# Note: We intentionally do NOT call dirx for depfile, since it contains """ + \
SPACE_REPLACEMENT + """ for
# spaces already and dirx strips the """ + SPACE_REPLACEMENT + \
""" characters.
define do_cmd
$(if $(or $(command_changed),$(prereq_changed)),
@$(call exact_echo, $($(quiet)cmd_$(1)))
@mkdir -p "$(call dirx,$@)" "$(dir $(depfile))"
$(if $(findstring flock,$(word %(flock_index)d,$(cmd_$1))),
@$(cmd_$(1))
@echo " $(quiet_cmd_$(1)): Finished",
@$(cmd_$(1))
)
@$(call exact_echo,$(call escape_vars,cmd_$(call replace_spaces,$@) := $(cmd_$(1)))) > $(depfile)
@$(if $(2),$(fixup_dep))
$(if $(and $(3), $(POSTBUILDS)),
$(call do_postbuilds)
)
)
endef
# Declare the "%(default_target)s" target first so it is the default,
# even though we don't have the deps yet.
.PHONY: %(default_target)s
%(default_target)s:
# make looks for ways to re-generate included makefiles, but in our case, we
# don't have a direct way. Explicitly telling make that it has nothing to do
# for them makes it go faster.
%%.d: ;
# Use FORCE_DO_CMD to force a target to run. Should be coupled with
# do_cmd.
.PHONY: FORCE_DO_CMD
FORCE_DO_CMD:
""")
SHARED_HEADER_MAC_COMMANDS = """
quiet_cmd_objc = CXX($(TOOLSET)) $@
cmd_objc = $(CC.$(TOOLSET)) $(GYP_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_objcxx = CXX($(TOOLSET)) $@
cmd_objcxx = $(CXX.$(TOOLSET)) $(GYP_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# Commands for precompiled header files.
quiet_cmd_pch_c = CXX($(TOOLSET)) $@
cmd_pch_c = $(CC.$(TOOLSET)) $(GYP_PCH_CFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_cc = CXX($(TOOLSET)) $@
cmd_pch_cc = $(CC.$(TOOLSET)) $(GYP_PCH_CXXFLAGS) $(DEPFLAGS) $(CXXFLAGS.$(TOOLSET)) -c -o $@ $<
quiet_cmd_pch_m = CXX($(TOOLSET)) $@
cmd_pch_m = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCFLAGS) $(DEPFLAGS) -c -o $@ $<
quiet_cmd_pch_mm = CXX($(TOOLSET)) $@
cmd_pch_mm = $(CC.$(TOOLSET)) $(GYP_PCH_OBJCXXFLAGS) $(DEPFLAGS) -c -o $@ $<
# gyp-mac-tool is written next to the root Makefile by gyp.
# Use $(4) for the command, since $(2) and $(3) are used as flag by do_cmd
# already.
quiet_cmd_mac_tool = MACTOOL $(4) $<
cmd_mac_tool = ./gyp-mac-tool $(4) $< "$@"
quiet_cmd_mac_package_framework = PACKAGE FRAMEWORK $@
cmd_mac_package_framework = ./gyp-mac-tool package-framework "$@" $(4)
quiet_cmd_infoplist = INFOPLIST $@
cmd_infoplist = $(CC.$(TOOLSET)) -E -P -Wno-trigraphs -x c $(INFOPLIST_DEFINES) "$<" -o "$@"
"""
def WriteRootHeaderSuffixRules(writer):
extensions = sorted(COMPILABLE_EXTENSIONS.keys(), key=str.lower)
writer.write('# Suffix rules, putting all outputs into $(obj).\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n# Try building from generated source, too.\n')
for ext in extensions:
writer.write(
'$(obj).$(TOOLSET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
for ext in extensions:
writer.write('$(obj).$(TOOLSET)/%%.o: $(obj)/%%%s FORCE_DO_CMD\n' % ext)
writer.write('\t@$(call do_cmd,%s,1)\n' % COMPILABLE_EXTENSIONS[ext])
writer.write('\n')
SHARED_HEADER_SUFFIX_RULES_COMMENT1 = ("""\
# Suffix rules, putting all outputs into $(obj).
""")
SHARED_HEADER_SUFFIX_RULES_COMMENT2 = ("""\
# Try building from generated source, too.
""")
SHARED_FOOTER = """\
# "all" is a concatenation of the "all" targets from all the included
# sub-makefiles. This is just here to clarify.
all:
# Add in dependency-tracking rules. $(all_deps) is the list of every single
# target in our tree. Only consider the ones with .d (dependency) info:
d_files := $(wildcard $(foreach f,$(all_deps),$(depsdir)/$(f).d))
ifneq ($(d_files),)
include $(d_files)
endif
"""
header = """\
# This file is generated by gyp; do not edit.
"""
# Maps every compilable file extension to the do_cmd that compiles it.
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 'cc',
'.S': 'cc',
}
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
for res in (filename.endswith(e) for e in COMPILABLE_EXTENSIONS):
if res:
return True
return False
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def Target(filename):
"""Translate a compilable filename to its .o target."""
return os.path.splitext(filename)[0] + '.o'
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def EscapeMakeVariableExpansion(s):
"""Make has its own variable expansion syntax using $. We must escape it for
string to be interpreted literally."""
return s.replace('$', '$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = EscapeMakeVariableExpansion(s)
# '#' characters must be escaped even embedded in a string, else Make will
# treat it as the start of a comment.
return s.replace('#', r'\#')
def QuoteIfNecessary(string):
"""TODO: Should this ideally be replaced with one or more of the above
functions?"""
if '"' in string:
string = '"' + string.replace('"', '\\"') + '"'
return string
def StringToMakefileVariable(string):
"""Convert a string to a value that is acceptable as a make variable name."""
return re.sub('[^a-zA-Z0-9_]', '_', string)
srcdir_prefix = ''
def Sourceify(path):
"""Convert a path to its source directory form."""
if '$(' in path:
return path
if os.path.isabs(path):
return path
return srcdir_prefix + path
def QuoteSpaces(s, quote=r'\ '):
return s.replace(' ', quote)
# TODO: Avoid code duplication with _ValidateSourcesForMSVSProject in msvs.py.
def _ValidateSourcesForOSX(spec, all_sources):
"""Makes sure if duplicate basenames are not specified in the source list.
Arguments:
spec: The target dictionary containing the properties of the target.
"""
if spec.get('type', None) != 'static_library':
return
basenames = {}
for source in all_sources:
name, ext = os.path.splitext(source)
is_compiled_file = ext in [
'.c', '.cc', '.cpp', '.cxx', '.m', '.mm', '.s', '.S']
if not is_compiled_file:
continue
basename = os.path.basename(name) # Don't include extension.
basenames.setdefault(basename, []).append(source)
error = ''
for basename, files in basenames.iteritems():
if len(files) > 1:
error += ' %s: %s\n' % (basename, ' '.join(files))
if error:
print('static library %s has several files with the same basename:\n' %
spec['target_name'] + error + 'libtool on OS X will generate' +
' warnings for them.')
raise GypError('Duplicate basenames in sources section, see list above')
# Map from qualified target to path to output.
target_outputs = {}
# Map from qualified target to any linkable output. A subset
# of target_outputs. E.g. when mybinary depends on liba, we want to
# include liba in the linker line; when otherbinary depends on
# mybinary, we just want to build mybinary first.
target_link_deps = {}
class MakefileWriter(object):
"""MakefileWriter packages up the writing of one target-specific foobar.mk.
Its only real entry point is Write(), and is mostly used for namespacing.
"""
def __init__(self, generator_flags, flavor):
self.generator_flags = generator_flags
self.flavor = flavor
self.suffix_rules_srcdir = {}
self.suffix_rules_objdir1 = {}
self.suffix_rules_objdir2 = {}
# Generate suffix rules for all compilable extensions.
for ext in COMPILABLE_EXTENSIONS.keys():
# Suffix rules for source folder.
self.suffix_rules_srcdir.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(srcdir)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
# Suffix rules for generated source files.
self.suffix_rules_objdir1.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj).$(TOOLSET)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
self.suffix_rules_objdir2.update({ext: ("""\
$(obj).$(TOOLSET)/$(TARGET)/%%.o: $(obj)/%%%s FORCE_DO_CMD
@$(call do_cmd,%s,1)
""" % (ext, COMPILABLE_EXTENSIONS[ext]))})
def Write(self, qualified_target, base_path, output_filename, spec, configs,
part_of_all):
"""The main entry point: writes a .mk file for a single target.
Arguments:
qualified_target: target we're generating
base_path: path relative to source root we're building in, used to resolve
target-relative paths
output_filename: output .mk file name to write
spec, configs: gyp info
part_of_all: flag indicating this target is part of 'all'
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
self.qualified_target = qualified_target
self.path = base_path
self.target = spec['target_name']
self.type = spec['type']
self.toolset = spec['toolset']
self.is_mac_bundle = gyp.xcode_emulation.IsMacBundle(self.flavor, spec)
if self.flavor == 'mac':
self.xcode_settings = gyp.xcode_emulation.XcodeSettings(spec)
else:
self.xcode_settings = None
deps, link_deps = self.ComputeDeps(spec)
# Some of the generation below can add extra output, sources, or
# link dependencies. All of the out params of the functions that
# follow use names like extra_foo.
extra_outputs = []
extra_sources = []
extra_link_deps = []
extra_mac_bundle_resources = []
mac_bundle_deps = []
if self.is_mac_bundle:
self.output = self.ComputeMacBundleOutput(spec)
self.output_binary = self.ComputeMacBundleBinaryOutput(spec)
else:
self.output = self.output_binary = self.ComputeOutput(spec)
self.is_standalone_static_library = bool(
spec.get('standalone_static_library', 0))
self._INSTALLABLE_TARGETS = ('executable', 'loadable_module',
'shared_library')
if (self.is_standalone_static_library or
self.type in self._INSTALLABLE_TARGETS):
self.alias = os.path.basename(self.output)
install_path = self._InstallableTargetInstallPath()
else:
self.alias = self.output
install_path = self.output
self.WriteLn("TOOLSET := " + self.toolset)
self.WriteLn("TARGET := " + self.target)
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
self.WriteActions(spec['actions'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
# Rules must be early like actions.
if 'rules' in spec:
self.WriteRules(spec['rules'], extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all)
if 'copies' in spec:
self.WriteCopies(spec['copies'], extra_outputs, part_of_all)
# Bundle resources.
if self.is_mac_bundle:
all_mac_bundle_resources = (
spec.get('mac_bundle_resources', []) + extra_mac_bundle_resources)
self.WriteMacBundleResources(all_mac_bundle_resources, mac_bundle_deps)
self.WriteMacInfoPlist(mac_bundle_deps)
# Sources.
all_sources = spec.get('sources', []) + extra_sources
if all_sources:
if self.flavor == 'mac':
# libtool on OS X generates warnings for duplicate basenames in the same
# target.
_ValidateSourcesForOSX(spec, all_sources)
self.WriteSources(
configs, deps, all_sources, extra_outputs,
extra_link_deps, part_of_all,
gyp.xcode_emulation.MacPrefixHeader(
self.xcode_settings, lambda p: Sourceify(self.Absolutify(p)),
self.Pchify))
sources = filter(Compilable, all_sources)
if sources:
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT1)
extensions = set([os.path.splitext(s)[1] for s in sources])
for ext in extensions:
if ext in self.suffix_rules_srcdir:
self.WriteLn(self.suffix_rules_srcdir[ext])
self.WriteLn(SHARED_HEADER_SUFFIX_RULES_COMMENT2)
for ext in extensions:
if ext in self.suffix_rules_objdir1:
self.WriteLn(self.suffix_rules_objdir1[ext])
for ext in extensions:
if ext in self.suffix_rules_objdir2:
self.WriteLn(self.suffix_rules_objdir2[ext])
self.WriteLn('# End of this set of suffix rules')
# Add dependency from bundle to bundle binary.
if self.is_mac_bundle:
mac_bundle_deps.append(self.output_binary)
self.WriteTarget(spec, configs, deps, extra_link_deps + link_deps,
mac_bundle_deps, extra_outputs, part_of_all)
# Update global list of target outputs, used in dependency tracking.
target_outputs[qualified_target] = install_path
# Update global list of link dependencies.
if self.type in ('static_library', 'shared_library'):
target_link_deps[qualified_target] = self.output_binary
# Currently any versions have the same effect, but in future the behavior
# could be different.
if self.generator_flags.get('android_ndk_version', None):
self.WriteAndroidNdkModuleRule(self.target, all_sources, link_deps)
self.fp.close()
def WriteSubMake(self, output_filename, makefile_path, targets, build_dir):
"""Write a "sub-project" Makefile.
This is a small, wrapper Makefile that calls the top-level Makefile to build
the targets from a single gyp file (i.e. a sub-project).
Arguments:
output_filename: sub-project Makefile name to write
makefile_path: path to the top-level Makefile
targets: list of "all" targets for this sub-project
build_dir: build output directory, relative to the sub-project
"""
gyp.common.EnsureDirExists(output_filename)
self.fp = open(output_filename, 'w')
self.fp.write(header)
# For consistency with other builders, put sub-project build output in the
# sub-project dir (see test/subdirectory/gyptest-subdir-all.py).
self.WriteLn('export builddir_name ?= %s' %
os.path.join(os.path.dirname(output_filename), build_dir))
self.WriteLn('.PHONY: all')
self.WriteLn('all:')
if makefile_path:
makefile_path = ' -C ' + makefile_path
self.WriteLn('\t$(MAKE)%s %s' % (makefile_path, ' '.join(targets)))
self.fp.close()
def WriteActions(self, actions, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'actions' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
actions (used to make other pieces dependent on these
actions)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for action in actions:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
action['action_name']))
self.WriteLn('### Rules for action "%s":' % action['action_name'])
inputs = action['inputs']
outputs = action['outputs']
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set()
for out in outputs:
dir = os.path.split(out)[0]
if dir:
dirs.add(dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
# Write the actual command.
action_commands = action['action']
if self.flavor == 'mac':
action_commands = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action_commands]
command = gyp.common.EncodePOSIXShellList(action_commands)
if 'message' in action:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, action['message']))
else:
self.WriteLn('quiet_cmd_%s = ACTION %s $@' % (name, name))
if len(dirs) > 0:
command = 'mkdir -p %s' % ' '.join(dirs) + '; ' + command
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# command and cd_action get written to a toplevel variable called
# cmd_foo. Toplevel variables can't handle things that change per
# makefile like $(TARGET), so hardcode the target.
command = command.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the action runs an executable from this
# build which links to shared libs from this build.
# actions run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn('cmd_%s = LD_LIBRARY_PATH=$(builddir)/lib.host:'
'$(builddir)/lib.target:$$LD_LIBRARY_PATH; '
'export LD_LIBRARY_PATH; '
'%s%s'
% (name, cd_action, command))
self.WriteLn()
outputs = map(self.Absolutify, outputs)
# The makefile rules are all relative to the top dir, but the gyp actions
# are defined relative to their containing dir. This replaces the obj
# variable for the action rule with an absolute version so that the output
# goes in the right place.
# Only write the 'obj' and 'builddir' rules for the "primary" output (:1);
# it's superfluous for the "extra outputs", and this avoids accidentally
# writing duplicate dummy rules for those outputs.
# Same for environment.
self.WriteLn("%s: obj := $(abs_obj)" % QuoteSpaces(outputs[0]))
self.WriteLn("%s: builddir := $(abs_builddir)" % QuoteSpaces(outputs[0]))
self.WriteSortedXcodeEnv(outputs[0], self.GetSortedXcodeEnv())
for input in inputs:
assert ' ' not in input, (
"Spaces in action input filenames not supported (%s)" % input)
for output in outputs:
assert ' ' not in output, (
"Spaces in action output filenames not supported (%s)" % output)
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
self.WriteDoCmd(outputs, map(Sourceify, map(self.Absolutify, inputs)),
part_of_all=part_of_all, command=name)
# Stuff the outputs in a variable so we can refer to them later.
outputs_variable = 'action_%s_outputs' % name
self.WriteLn('%s := %s' % (outputs_variable, ' '.join(outputs)))
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn()
self.WriteLn()
def WriteRules(self, rules, extra_sources, extra_outputs,
extra_mac_bundle_resources, part_of_all):
"""Write Makefile code for any 'rules' from the gyp input.
extra_sources: a list that will be filled in with newly generated source
files, if any
extra_outputs: a list that will be filled in with any outputs of these
rules (used to make other pieces dependent on these rules)
part_of_all: flag indicating this target is part of 'all'
"""
env = self.GetSortedXcodeEnv()
for rule in rules:
name = StringToMakefileVariable('%s_%s' % (self.qualified_target,
rule['rule_name']))
count = 0
self.WriteLn('### Generated for rule %s:' % name)
all_outputs = []
for rule_source in rule.get('rule_sources', []):
dirs = set()
(rule_source_dirname, rule_source_basename) = os.path.split(rule_source)
(rule_source_root, rule_source_ext) = \
os.path.splitext(rule_source_basename)
outputs = [self.ExpandInputRoot(out, rule_source_root,
rule_source_dirname)
for out in rule['outputs']]
for out in outputs:
dir = os.path.dirname(out)
if dir:
dirs.add(dir)
if int(rule.get('process_outputs_as_sources', False)):
extra_sources += outputs
if int(rule.get('process_outputs_as_mac_bundle_resources', False)):
extra_mac_bundle_resources += outputs
inputs = map(Sourceify, map(self.Absolutify, [rule_source] +
rule.get('inputs', [])))
actions = ['$(call do_cmd,%s_%d)' % (name, count)]
if name == 'resources_grit':
# HACK: This is ugly. Grit intentionally doesn't touch the
# timestamp of its output file when the file doesn't change,
# which is fine in hash-based dependency systems like scons
# and forge, but not kosher in the make world. After some
# discussion, hacking around it here seems like the least
# amount of pain.
actions += ['@touch --no-create $@']
# See the comment in WriteCopies about expanding env vars.
outputs = [gyp.xcode_emulation.ExpandEnvVars(o, env) for o in outputs]
inputs = [gyp.xcode_emulation.ExpandEnvVars(i, env) for i in inputs]
outputs = map(self.Absolutify, outputs)
all_outputs += outputs
# Only write the 'obj' and 'builddir' rules for the "primary" output
# (:1); it's superfluous for the "extra outputs", and this avoids
# accidentally writing duplicate dummy rules for those outputs.
self.WriteLn('%s: obj := $(abs_obj)' % outputs[0])
self.WriteLn('%s: builddir := $(abs_builddir)' % outputs[0])
self.WriteMakeRule(outputs, inputs, actions,
command="%s_%d" % (name, count))
# Spaces in rule filenames are not supported, but rule variables have
# spaces in them (e.g. RULE_INPUT_PATH expands to '$(abspath $<)').
# The spaces within the variables are valid, so remove the variables
# before checking.
variables_with_spaces = re.compile(r'\$\([^ ]* \$<\)')
for output in outputs:
output = re.sub(variables_with_spaces, '', output)
assert ' ' not in output, (
"Spaces in rule filenames not yet supported (%s)" % output)
self.WriteLn('all_deps += %s' % ' '.join(outputs))
action = [self.ExpandInputRoot(ac, rule_source_root,
rule_source_dirname)
for ac in rule['action']]
mkdirs = ''
if len(dirs) > 0:
mkdirs = 'mkdir -p %s; ' % ' '.join(dirs)
cd_action = 'cd %s; ' % Sourceify(self.path or '.')
# action, cd_action, and mkdirs get written to a toplevel variable
# called cmd_foo. Toplevel variables can't handle things that change
# per makefile like $(TARGET), so hardcode the target.
if self.flavor == 'mac':
action = [gyp.xcode_emulation.ExpandEnvVars(command, env)
for command in action]
action = gyp.common.EncodePOSIXShellList(action)
action = action.replace('$(TARGET)', self.target)
cd_action = cd_action.replace('$(TARGET)', self.target)
mkdirs = mkdirs.replace('$(TARGET)', self.target)
# Set LD_LIBRARY_PATH in case the rule runs an executable from this
# build which links to shared libs from this build.
# rules run on the host, so they should in theory only use host
# libraries, but until everything is made cross-compile safe, also use
# target libraries.
# TODO(piman): when everything is cross-compile safe, remove lib.target
self.WriteLn(
"cmd_%(name)s_%(count)d = LD_LIBRARY_PATH="
"$(builddir)/lib.host:$(builddir)/lib.target:$$LD_LIBRARY_PATH; "
"export LD_LIBRARY_PATH; "
"%(cd_action)s%(mkdirs)s%(action)s" % {
'action': action,
'cd_action': cd_action,
'count': count,
'mkdirs': mkdirs,
'name': name,
})
self.WriteLn(
'quiet_cmd_%(name)s_%(count)d = RULE %(name)s_%(count)d $@' % {
'count': count,
'name': name,
})
self.WriteLn()
count += 1
outputs_variable = 'rule_%s_outputs' % name
self.WriteList(all_outputs, outputs_variable)
extra_outputs.append('$(%s)' % outputs_variable)
self.WriteLn('### Finished generating for rule: %s' % name)
self.WriteLn()
self.WriteLn('### Finished generating for all rules')
self.WriteLn('')
def WriteCopies(self, copies, extra_outputs, part_of_all):
"""Write Makefile code for any 'copies' from the gyp input.
extra_outputs: a list that will be filled in with any outputs of this action
(used to make other pieces dependent on this action)
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Generated for copy rule.')
variable = StringToMakefileVariable(self.qualified_target + '_copies')
outputs = []
for copy in copies:
for path in copy['files']:
# Absolutify() may call normpath, and will strip trailing slashes.
path = Sourceify(self.Absolutify(path))
filename = os.path.split(path)[1]
output = Sourceify(self.Absolutify(os.path.join(copy['destination'],
filename)))
# If the output path has variables in it, which happens in practice for
# 'copies', writing the environment as target-local doesn't work,
# because the variables are already needed for the target name.
# Copying the environment variables into global make variables doesn't
# work either, because then the .d files will potentially contain spaces
# after variable expansion, and .d file handling cannot handle spaces.
# As a workaround, manually expand variables at gyp time. Since 'copies'
# can't run scripts, there's no need to write the env then.
# WriteDoCmd() will escape spaces for .d files.
env = self.GetSortedXcodeEnv()
output = gyp.xcode_emulation.ExpandEnvVars(output, env)
path = gyp.xcode_emulation.ExpandEnvVars(path, env)
self.WriteDoCmd([output], [path], 'copy', part_of_all)
outputs.append(output)
self.WriteLn('%s = %s' % (variable, ' '.join(map(QuoteSpaces, outputs))))
extra_outputs.append('$(%s)' % variable)
self.WriteLn()
def WriteMacBundleResources(self, resources, bundle_deps):
"""Writes Makefile code for 'mac_bundle_resources'."""
self.WriteLn('### Generated for mac_bundle_resources')
for output, res in gyp.xcode_emulation.GetMacBundleResources(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
map(Sourceify, map(self.Absolutify, resources))):
_, ext = os.path.splitext(output)
if ext != '.xcassets':
# Make does not supports '.xcassets' emulation.
self.WriteDoCmd([output], [res], 'mac_tool,,,copy-bundle-resource',
part_of_all=True)
bundle_deps.append(output)
def WriteMacInfoPlist(self, bundle_deps):
"""Write Makefile code for bundle Info.plist files."""
info_plist, out, defines, extra_env = gyp.xcode_emulation.GetMacInfoPlist(
generator_default_variables['PRODUCT_DIR'], self.xcode_settings,
lambda p: Sourceify(self.Absolutify(p)))
if not info_plist:
return
if defines:
# Create an intermediate file to store preprocessed results.
intermediate_plist = ('$(obj).$(TOOLSET)/$(TARGET)/' +
os.path.basename(info_plist))
self.WriteList(defines, intermediate_plist + ': INFOPLIST_DEFINES', '-D',
quoter=EscapeCppDefine)
self.WriteMakeRule([intermediate_plist], [info_plist],
['$(call do_cmd,infoplist)',
# "Convert" the plist so that any weird whitespace changes from the
# preprocessor do not affect the XML parser in mac_tool.
'@plutil -convert xml1 $@ $@'])
info_plist = intermediate_plist
# plists can contain envvars and substitute them into the file.
self.WriteSortedXcodeEnv(
out, self.GetSortedXcodeEnv(additional_settings=extra_env))
self.WriteDoCmd([out], [info_plist], 'mac_tool,,,copy-info-plist',
part_of_all=True)
bundle_deps.append(out)
def WriteSources(self, configs, deps, sources,
extra_outputs, extra_link_deps,
part_of_all, precompiled_header):
"""Write Makefile code for any 'sources' from the gyp input.
These are source files necessary to build the current target.
configs, deps, sources: input from gyp.
extra_outputs: a list of extra outputs this action should be dependent on;
used to serialize action/rules before compilation
extra_link_deps: a list that will be filled in with any outputs of
compilation (to be used in link lines)
part_of_all: flag indicating this target is part of 'all'
"""
# Write configuration-specific variables for CFLAGS, etc.
for configname in sorted(configs.keys()):
config = configs[configname]
self.WriteList(config.get('defines'), 'DEFS_%s' % configname, prefix='-D',
quoter=EscapeCppDefine)
if self.flavor == 'mac':
cflags = self.xcode_settings.GetCflags(configname)
cflags_c = self.xcode_settings.GetCflagsC(configname)
cflags_cc = self.xcode_settings.GetCflagsCC(configname)
cflags_objc = self.xcode_settings.GetCflagsObjC(configname)
cflags_objcc = self.xcode_settings.GetCflagsObjCC(configname)
else:
cflags = config.get('cflags')
cflags_c = config.get('cflags_c')
cflags_cc = config.get('cflags_cc')
self.WriteLn("# Flags passed to all source files.");
self.WriteList(cflags, 'CFLAGS_%s' % configname)
self.WriteLn("# Flags passed to only C files.");
self.WriteList(cflags_c, 'CFLAGS_C_%s' % configname)
self.WriteLn("# Flags passed to only C++ files.");
self.WriteList(cflags_cc, 'CFLAGS_CC_%s' % configname)
if self.flavor == 'mac':
self.WriteLn("# Flags passed to only ObjC files.");
self.WriteList(cflags_objc, 'CFLAGS_OBJC_%s' % configname)
self.WriteLn("# Flags passed to only ObjC++ files.");
self.WriteList(cflags_objcc, 'CFLAGS_OBJCC_%s' % configname)
includes = config.get('include_dirs')
if includes:
includes = map(Sourceify, map(self.Absolutify, includes))
self.WriteList(includes, 'INCS_%s' % configname, prefix='-I')
compilable = filter(Compilable, sources)
objs = map(self.Objectify, map(self.Absolutify, map(Target, compilable)))
self.WriteList(objs, 'OBJS')
for obj in objs:
assert ' ' not in obj, (
"Spaces in object filenames not supported (%s)" % obj)
self.WriteLn('# Add to the list of files we specially track '
'dependencies for.')
self.WriteLn('all_deps += $(OBJS)')
self.WriteLn()
# Make sure our dependencies are built first.
if deps:
self.WriteMakeRule(['$(OBJS)'], deps,
comment = 'Make sure our dependencies are built '
'before any of us.',
order_only = True)
# Make sure the actions and rules run first.
# If they generate any extra headers etc., the per-.o file dep tracking
# will catch the proper rebuilds, so order only is still ok here.
if extra_outputs:
self.WriteMakeRule(['$(OBJS)'], extra_outputs,
comment = 'Make sure our actions/rules run '
'before any of us.',
order_only = True)
pchdeps = precompiled_header.GetObjDependencies(compilable, objs )
if pchdeps:
self.WriteLn('# Dependencies from obj files to their precompiled headers')
for source, obj, gch in pchdeps:
self.WriteLn('%s: %s' % (obj, gch))
self.WriteLn('# End precompiled header dependencies')
if objs:
extra_link_deps.append('$(OBJS)')
self.WriteLn("""\
# CFLAGS et al overrides must be target-local.
# See "Target-specific Variable Values" in the GNU Make manual.""")
self.WriteLn("$(OBJS): TOOLSET := $(TOOLSET)")
self.WriteLn("$(OBJS): GYP_CFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('c') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_CXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('cc') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE))")
if self.flavor == 'mac':
self.WriteLn("$(OBJS): GYP_OBJCFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('m') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_C_$(BUILDTYPE)) "
"$(CFLAGS_OBJC_$(BUILDTYPE))")
self.WriteLn("$(OBJS): GYP_OBJCXXFLAGS := "
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"%s " % precompiled_header.GetInclude('mm') +
"$(CFLAGS_$(BUILDTYPE)) "
"$(CFLAGS_CC_$(BUILDTYPE)) "
"$(CFLAGS_OBJCC_$(BUILDTYPE))")
self.WritePchTargets(precompiled_header.GetPchBuildCommands())
# If there are any object files in our input file list, link them into our
# output.
extra_link_deps += filter(Linkable, sources)
self.WriteLn()
def WritePchTargets(self, pch_commands):
"""Writes make rules to compile prefix headers."""
if not pch_commands:
return
for gch, lang_flag, lang, input in pch_commands:
extra_flags = {
'c': '$(CFLAGS_C_$(BUILDTYPE))',
'cc': '$(CFLAGS_CC_$(BUILDTYPE))',
'm': '$(CFLAGS_C_$(BUILDTYPE)) $(CFLAGS_OBJC_$(BUILDTYPE))',
'mm': '$(CFLAGS_CC_$(BUILDTYPE)) $(CFLAGS_OBJCC_$(BUILDTYPE))',
}[lang]
var_name = {
'c': 'GYP_PCH_CFLAGS',
'cc': 'GYP_PCH_CXXFLAGS',
'm': 'GYP_PCH_OBJCFLAGS',
'mm': 'GYP_PCH_OBJCXXFLAGS',
}[lang]
self.WriteLn("%s: %s := %s " % (gch, var_name, lang_flag) +
"$(DEFS_$(BUILDTYPE)) "
"$(INCS_$(BUILDTYPE)) "
"$(CFLAGS_$(BUILDTYPE)) " +
extra_flags)
self.WriteLn('%s: %s FORCE_DO_CMD' % (gch, input))
self.WriteLn('\t@$(call do_cmd,pch_%s,1)' % lang)
self.WriteLn('')
assert ' ' not in gch, (
"Spaces in gch filenames not supported (%s)" % gch)
self.WriteLn('all_deps += %s' % gch)
self.WriteLn('')
def ComputeOutputBasename(self, spec):
"""Return the 'output basename' of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'libfoobar.so'
"""
assert not self.is_mac_bundle
if self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module'):
return self.xcode_settings.GetExecutablePath()
target = spec['target_name']
target_prefix = ''
target_ext = ''
if self.type == 'static_library':
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.a'
elif self.type in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = 'lib'
target_ext = '.so'
elif self.type == 'none':
target = '%s.stamp' % target
elif self.type != 'executable':
print ("ERROR: What output file should be generated?",
"type", self.type, "target", target)
target_prefix = spec.get('product_prefix', target_prefix)
target = spec.get('product_name', target)
product_ext = spec.get('product_extension')
if product_ext:
target_ext = '.' + product_ext
return target_prefix + target + target_ext
def _InstallImmediately(self):
return self.toolset == 'target' and self.flavor == 'mac' and self.type in (
'static_library', 'executable', 'shared_library', 'loadable_module')
def ComputeOutput(self, spec):
"""Return the 'output' (full output path) of a gyp spec.
E.g., the loadable module 'foobar' in directory 'baz' will produce
'$(obj)/baz/libfoobar.so'
"""
assert not self.is_mac_bundle
path = os.path.join('$(obj).' + self.toolset, self.path)
if self.type == 'executable' or self._InstallImmediately():
path = '$(builddir)'
path = spec.get('product_dir', path)
return os.path.join(path, self.ComputeOutputBasename(spec))
def ComputeMacBundleOutput(self, spec):
"""Return the 'output' (full output path) to a bundle output directory."""
assert self.is_mac_bundle
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetWrapperName())
def ComputeMacBundleBinaryOutput(self, spec):
"""Return the 'output' (full output path) to the binary in a bundle."""
path = generator_default_variables['PRODUCT_DIR']
return os.path.join(path, self.xcode_settings.GetExecutablePath())
def ComputeDeps(self, spec):
"""Compute the dependencies of a gyp spec.
Returns a tuple (deps, link_deps), where each is a list of
filenames that will need to be put in front of make for either
building (deps) or linking (link_deps).
"""
deps = []
link_deps = []
if 'dependencies' in spec:
deps.extend([target_outputs[dep] for dep in spec['dependencies']
if target_outputs[dep]])
for dep in spec['dependencies']:
if dep in target_link_deps:
link_deps.append(target_link_deps[dep])
deps.extend(link_deps)
# TODO: It seems we need to transitively link in libraries (e.g. -lfoo)?
# This hack makes it work:
# link_deps.extend(spec.get('libraries', []))
return (gyp.common.uniquer(deps), gyp.common.uniquer(link_deps))
def WriteDependencyOnExtraOutputs(self, target, extra_outputs):
self.WriteMakeRule([self.output_binary], extra_outputs,
comment = 'Build our special outputs first.',
order_only = True)
def WriteTarget(self, spec, configs, deps, link_deps, bundle_deps,
extra_outputs, part_of_all):
"""Write Makefile code to produce the final target of the gyp spec.
spec, configs: input from gyp.
deps, link_deps: dependency lists; see ComputeDeps()
extra_outputs: any extra outputs that our target should depend on
part_of_all: flag indicating this target is part of 'all'
"""
self.WriteLn('### Rules for final target.')
if extra_outputs:
self.WriteDependencyOnExtraOutputs(self.output_binary, extra_outputs)
self.WriteMakeRule(extra_outputs, deps,
comment=('Preserve order dependency of '
'special output on deps.'),
order_only = True)
target_postbuilds = {}
if self.type != 'none':
for configname in sorted(configs.keys()):
config = configs[configname]
if self.flavor == 'mac':
ldflags = self.xcode_settings.GetLdflags(configname,
generator_default_variables['PRODUCT_DIR'],
lambda p: Sourceify(self.Absolutify(p)))
# TARGET_POSTBUILDS_$(BUILDTYPE) is added to postbuilds later on.
gyp_to_build = gyp.common.InvertRelativePath(self.path)
target_postbuild = self.xcode_settings.AddImplicitPostbuilds(
configname,
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output))),
QuoteSpaces(os.path.normpath(os.path.join(gyp_to_build,
self.output_binary))))
if target_postbuild:
target_postbuilds[configname] = target_postbuild
else:
ldflags = config.get('ldflags', [])
# Compute an rpath for this output if needed.
if any(dep.endswith('.so') or '.so.' in dep for dep in deps):
# We want to get the literal string "$ORIGIN" into the link command,
# so we need lots of escaping.
ldflags.append(r'-Wl,-rpath=\$$ORIGIN/lib.%s/' % self.toolset)
ldflags.append(r'-Wl,-rpath-link=\$(builddir)/lib.%s/' %
self.toolset)
library_dirs = config.get('library_dirs', [])
ldflags += [('-L%s' % library_dir) for library_dir in library_dirs]
self.WriteList(ldflags, 'LDFLAGS_%s' % configname)
if self.flavor == 'mac':
self.WriteList(self.xcode_settings.GetLibtoolflags(configname),
'LIBTOOLFLAGS_%s' % configname)
libraries = spec.get('libraries')
if libraries:
# Remove duplicate entries
libraries = gyp.common.uniquer(libraries)
if self.flavor == 'mac':
libraries = self.xcode_settings.AdjustLibraries(libraries)
self.WriteList(libraries, 'LIBS')
self.WriteLn('%s: GYP_LDFLAGS := $(LDFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
self.WriteLn('%s: LIBS := $(LIBS)' % QuoteSpaces(self.output_binary))
if self.flavor == 'mac':
self.WriteLn('%s: GYP_LIBTOOLFLAGS := $(LIBTOOLFLAGS_$(BUILDTYPE))' %
QuoteSpaces(self.output_binary))
# Postbuild actions. Like actions, but implicitly depend on the target's
# output.
postbuilds = []
if self.flavor == 'mac':
if target_postbuilds:
postbuilds.append('$(TARGET_POSTBUILDS_$(BUILDTYPE))')
postbuilds.extend(
gyp.xcode_emulation.GetSpecPostbuildCommands(spec))
if postbuilds:
# Envvars may be referenced by TARGET_POSTBUILDS_$(BUILDTYPE),
# so we must output its definition first, since we declare variables
# using ":=".
self.WriteSortedXcodeEnv(self.output, self.GetSortedXcodePostbuildEnv())
for configname in target_postbuilds:
self.WriteLn('%s: TARGET_POSTBUILDS_%s := %s' %
(QuoteSpaces(self.output),
configname,
gyp.common.EncodePOSIXShellList(target_postbuilds[configname])))
# Postbuilds expect to be run in the gyp file's directory, so insert an
# implicit postbuild to cd to there.
postbuilds.insert(0, gyp.common.EncodePOSIXShellList(['cd', self.path]))
for i in xrange(len(postbuilds)):
if not postbuilds[i].startswith('$'):
postbuilds[i] = EscapeShellArgument(postbuilds[i])
self.WriteLn('%s: builddir := $(abs_builddir)' % QuoteSpaces(self.output))
self.WriteLn('%s: POSTBUILDS := %s' % (
QuoteSpaces(self.output), ' '.join(postbuilds)))
# A bundle directory depends on its dependencies such as bundle resources
# and bundle binary. When all dependencies have been built, the bundle
# needs to be packaged.
if self.is_mac_bundle:
# If the framework doesn't contain a binary, then nothing depends
# on the actions -- make the framework depend on them directly too.
self.WriteDependencyOnExtraOutputs(self.output, extra_outputs)
# Bundle dependencies. Note that the code below adds actions to this
# target, so if you move these two lines, move the lines below as well.
self.WriteList(map(QuoteSpaces, bundle_deps), 'BUNDLE_DEPS')
self.WriteLn('%s: $(BUNDLE_DEPS)' % QuoteSpaces(self.output))
# After the framework is built, package it. Needs to happen before
# postbuilds, since postbuilds depend on this.
if self.type in ('shared_library', 'loadable_module'):
self.WriteLn('\t@$(call do_cmd,mac_package_framework,,,%s)' %
self.xcode_settings.GetFrameworkVersion())
# Bundle postbuilds can depend on the whole bundle, so run them after
# the bundle is packaged, not already after the bundle binary is done.
if postbuilds:
self.WriteLn('\t@$(call do_postbuilds)')
postbuilds = [] # Don't write postbuilds for target's output.
# Needed by test/mac/gyptest-rebuild.py.
self.WriteLn('\t@true # No-op, used by tests')
# Since this target depends on binary and resources which are in
# nested subfolders, the framework directory will be older than
# its dependencies usually. To prevent this rule from executing
# on every build (expensive, especially with postbuilds), expliclity
# update the time on the framework directory.
self.WriteLn('\t@touch -c %s' % QuoteSpaces(self.output))
if postbuilds:
assert not self.is_mac_bundle, ('Postbuilds for bundles should be done '
'on the bundle, not the binary (target \'%s\')' % self.target)
assert 'product_dir' not in spec, ('Postbuilds do not work with '
'custom product_dir')
if self.type == 'executable':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'link_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'link', part_of_all,
postbuilds=postbuilds)
elif self.type == 'static_library':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in alink input filenames not supported (%s)" % link_dep)
if (self.flavor not in ('mac', 'openbsd', 'netbsd', 'win') and not
self.is_standalone_static_library):
self.WriteDoCmd([self.output_binary], link_deps, 'alink_thin',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd([self.output_binary], link_deps, 'alink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'shared_library':
self.WriteLn('%s: LD_INPUTS := %s' % (
QuoteSpaces(self.output_binary),
' '.join(map(QuoteSpaces, link_deps))))
self.WriteDoCmd([self.output_binary], link_deps, 'solink', part_of_all,
postbuilds=postbuilds)
elif self.type == 'loadable_module':
for link_dep in link_deps:
assert ' ' not in link_dep, (
"Spaces in module input filenames not supported (%s)" % link_dep)
if self.toolset == 'host' and self.flavor == 'android':
self.WriteDoCmd([self.output_binary], link_deps, 'solink_module_host',
part_of_all, postbuilds=postbuilds)
else:
self.WriteDoCmd(
[self.output_binary], link_deps, 'solink_module', part_of_all,
postbuilds=postbuilds)
elif self.type == 'none':
# Write a stamp line.
self.WriteDoCmd([self.output_binary], deps, 'touch', part_of_all,
postbuilds=postbuilds)
else:
print "WARNING: no output for", self.type, target
# Add an alias for each target (if there are any outputs).
# Installable target aliases are created below.
if ((self.output and self.output != self.target) and
(self.type not in self._INSTALLABLE_TARGETS)):
self.WriteMakeRule([self.target], [self.output],
comment='Add target alias', phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [self.target],
comment = 'Add target alias to "all" target.',
phony = True)
# Add special-case rules for our installable targets.
# 1) They need to install to the build dir or "product" dir.
# 2) They get shortcuts for building (e.g. "make chrome").
# 3) They are part of "make all".
if (self.type in self._INSTALLABLE_TARGETS or
self.is_standalone_static_library):
if self.type == 'shared_library':
file_desc = 'shared library'
elif self.type == 'static_library':
file_desc = 'static library'
else:
file_desc = 'executable'
install_path = self._InstallableTargetInstallPath()
installable_deps = [self.output]
if (self.flavor == 'mac' and not 'product_dir' in spec and
self.toolset == 'target'):
# On mac, products are created in install_path immediately.
assert install_path == self.output, '%s != %s' % (
install_path, self.output)
# Point the target alias to the final binary output.
self.WriteMakeRule([self.target], [install_path],
comment='Add target alias', phony = True)
if install_path != self.output:
assert not self.is_mac_bundle # See comment a few lines above.
self.WriteDoCmd([install_path], [self.output], 'copy',
comment = 'Copy this to the %s output path.' %
file_desc, part_of_all=part_of_all)
installable_deps.append(install_path)
if self.output != self.alias and self.alias != self.target:
self.WriteMakeRule([self.alias], installable_deps,
comment = 'Short alias for building this %s.' %
file_desc, phony = True)
if part_of_all:
self.WriteMakeRule(['all'], [install_path],
comment = 'Add %s to "all" target.' % file_desc,
phony = True)
def WriteList(self, value_list, variable=None, prefix='',
quoter=QuoteIfNecessary):
"""Write a variable definition that is a list of values.
E.g. WriteList(['a','b'], 'foo', prefix='blah') writes out
foo = blaha blahb
but in a pretty-printed style.
"""
values = ''
if value_list:
value_list = [quoter(prefix + l) for l in value_list]
values = ' \\\n\t' + ' \\\n\t'.join(value_list)
self.fp.write('%s :=%s\n\n' % (variable, values))
def WriteDoCmd(self, outputs, inputs, command, part_of_all, comment=None,
postbuilds=False):
"""Write a Makefile rule that uses do_cmd.
This makes the outputs dependent on the command line that was run,
as well as support the V= make command line flag.
"""
suffix = ''
if postbuilds:
assert ',' not in command
suffix = ',,1' # Tell do_cmd to honor $POSTBUILDS
self.WriteMakeRule(outputs, inputs,
actions = ['$(call do_cmd,%s%s)' % (command, suffix)],
comment = comment,
command = command,
force = True)
# Add our outputs to the list of targets we read depfiles from.
# all_deps is only used for deps file reading, and for deps files we replace
# spaces with ? because escaping doesn't work with make's $(sort) and
# other functions.
outputs = [QuoteSpaces(o, SPACE_REPLACEMENT) for o in outputs]
self.WriteLn('all_deps += %s' % ' '.join(outputs))
def WriteMakeRule(self, outputs, inputs, actions=None, comment=None,
order_only=False, force=False, phony=False, command=None):
"""Write a Makefile rule, with some extra tricks.
outputs: a list of outputs for the rule (note: this is not directly
supported by make; see comments below)
inputs: a list of inputs for the rule
actions: a list of shell commands to run for the rule
comment: a comment to put in the Makefile above the rule (also useful
for making this Python script's code self-documenting)
order_only: if true, makes the dependency order-only
force: if true, include FORCE_DO_CMD as an order-only dep
phony: if true, the rule does not actually generate the named output, the
output is just a name to run the rule
command: (optional) command name to generate unambiguous labels
"""
outputs = map(QuoteSpaces, outputs)
inputs = map(QuoteSpaces, inputs)
if comment:
self.WriteLn('# ' + comment)
if phony:
self.WriteLn('.PHONY: ' + ' '.join(outputs))
if actions:
self.WriteLn("%s: TOOLSET := $(TOOLSET)" % outputs[0])
force_append = ' FORCE_DO_CMD' if force else ''
if order_only:
# Order only rule: Just write a simple rule.
# TODO(evanm): just make order_only a list of deps instead of this hack.
self.WriteLn('%s: | %s%s' %
(' '.join(outputs), ' '.join(inputs), force_append))
elif len(outputs) == 1:
# Regular rule, one output: Just write a simple rule.
self.WriteLn('%s: %s%s' % (outputs[0], ' '.join(inputs), force_append))
else:
# Regular rule, more than one output: Multiple outputs are tricky in
# make. We will write three rules:
# - All outputs depend on an intermediate file.
# - Make .INTERMEDIATE depend on the intermediate.
# - The intermediate file depends on the inputs and executes the
# actual command.
# - The intermediate recipe will 'touch' the intermediate file.
# - The multi-output rule will have an do-nothing recipe.
intermediate = "%s.intermediate" % (command if command else self.target)
self.WriteLn('%s: %s' % (' '.join(outputs), intermediate))
self.WriteLn('\t%s' % '@:');
self.WriteLn('%s: %s' % ('.INTERMEDIATE', intermediate))
self.WriteLn('%s: %s%s' %
(intermediate, ' '.join(inputs), force_append))
actions.insert(0, '$(call do_cmd,touch)')
if actions:
for action in actions:
self.WriteLn('\t%s' % action)
self.WriteLn()
def WriteAndroidNdkModuleRule(self, module_name, all_sources, link_deps):
"""Write a set of LOCAL_XXX definitions for Android NDK.
These variable definitions will be used by Android NDK but do nothing for
non-Android applications.
Arguments:
module_name: Android NDK module name, which must be unique among all
module names.
all_sources: A list of source files (will be filtered by Compilable).
link_deps: A list of link dependencies, which must be sorted in
the order from dependencies to dependents.
"""
if self.type not in ('executable', 'shared_library', 'static_library'):
return
self.WriteLn('# Variable definitions for Android applications')
self.WriteLn('include $(CLEAR_VARS)')
self.WriteLn('LOCAL_MODULE := ' + module_name)
self.WriteLn('LOCAL_CFLAGS := $(CFLAGS_$(BUILDTYPE)) '
'$(DEFS_$(BUILDTYPE)) '
# LOCAL_CFLAGS is applied to both of C and C++. There is
# no way to specify $(CFLAGS_C_$(BUILDTYPE)) only for C
# sources.
'$(CFLAGS_C_$(BUILDTYPE)) '
# $(INCS_$(BUILDTYPE)) includes the prefix '-I' while
# LOCAL_C_INCLUDES does not expect it. So put it in
# LOCAL_CFLAGS.
'$(INCS_$(BUILDTYPE))')
# LOCAL_CXXFLAGS is obsolete and LOCAL_CPPFLAGS is preferred.
self.WriteLn('LOCAL_CPPFLAGS := $(CFLAGS_CC_$(BUILDTYPE))')
self.WriteLn('LOCAL_C_INCLUDES :=')
self.WriteLn('LOCAL_LDLIBS := $(LDFLAGS_$(BUILDTYPE)) $(LIBS)')
# Detect the C++ extension.
cpp_ext = {'.cc': 0, '.cpp': 0, '.cxx': 0}
default_cpp_ext = '.cpp'
for filename in all_sources:
ext = os.path.splitext(filename)[1]
if ext in cpp_ext:
cpp_ext[ext] += 1
if cpp_ext[ext] > cpp_ext[default_cpp_ext]:
default_cpp_ext = ext
self.WriteLn('LOCAL_CPP_EXTENSION := ' + default_cpp_ext)
self.WriteList(map(self.Absolutify, filter(Compilable, all_sources)),
'LOCAL_SRC_FILES')
# Filter out those which do not match prefix and suffix and produce
# the resulting list without prefix and suffix.
def DepsToModules(deps, prefix, suffix):
modules = []
for filepath in deps:
filename = os.path.basename(filepath)
if filename.startswith(prefix) and filename.endswith(suffix):
modules.append(filename[len(prefix):-len(suffix)])
return modules
# Retrieve the default value of 'SHARED_LIB_SUFFIX'
params = {'flavor': 'linux'}
default_variables = {}
CalculateVariables(default_variables, params)
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['SHARED_LIB_PREFIX'],
default_variables['SHARED_LIB_SUFFIX']),
'LOCAL_SHARED_LIBRARIES')
self.WriteList(
DepsToModules(link_deps,
generator_default_variables['STATIC_LIB_PREFIX'],
generator_default_variables['STATIC_LIB_SUFFIX']),
'LOCAL_STATIC_LIBRARIES')
if self.type == 'executable':
self.WriteLn('include $(BUILD_EXECUTABLE)')
elif self.type == 'shared_library':
self.WriteLn('include $(BUILD_SHARED_LIBRARY)')
elif self.type == 'static_library':
self.WriteLn('include $(BUILD_STATIC_LIBRARY)')
self.WriteLn()
def WriteLn(self, text=''):
self.fp.write(text + '\n')
def GetSortedXcodeEnv(self, additional_settings=None):
return gyp.xcode_emulation.GetSortedXcodeEnv(
self.xcode_settings, "$(abs_builddir)",
os.path.join("$(abs_srcdir)", self.path), "$(BUILDTYPE)",
additional_settings)
def GetSortedXcodePostbuildEnv(self):
# CHROMIUM_STRIP_SAVE_FILE is a chromium-specific hack.
# TODO(thakis): It would be nice to have some general mechanism instead.
strip_save_file = self.xcode_settings.GetPerTargetSetting(
'CHROMIUM_STRIP_SAVE_FILE', '')
# Even if strip_save_file is empty, explicitly write it. Else a postbuild
# might pick up an export from an earlier target.
return self.GetSortedXcodeEnv(
additional_settings={'CHROMIUM_STRIP_SAVE_FILE': strip_save_file})
def WriteSortedXcodeEnv(self, target, env):
for k, v in env:
# For
# foo := a\ b
# the escaped space does the right thing. For
# export foo := a\ b
# it does not -- the backslash is written to the env as literal character.
# So don't escape spaces in |env[k]|.
self.WriteLn('%s: export %s := %s' % (QuoteSpaces(target), k, v))
def Objectify(self, path):
"""Convert a path to its output directory form."""
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/' % self.toolset)
if not '$(obj)' in path:
path = '$(obj).%s/$(TARGET)/%s' % (self.toolset, path)
return path
def Pchify(self, path, lang):
"""Convert a prefix header path to its output directory form."""
path = self.Absolutify(path)
if '$(' in path:
path = path.replace('$(obj)/', '$(obj).%s/$(TARGET)/pch-%s' %
(self.toolset, lang))
return path
return '$(obj).%s/$(TARGET)/pch-%s/%s' % (self.toolset, lang, path)
def Absolutify(self, path):
"""Convert a subdirectory-relative path into a base-relative path.
Skips over paths that contain variables."""
if '$(' in path:
# Don't call normpath in this case, as it might collapse the
# path too aggressively if it features '..'. However it's still
# important to strip trailing slashes.
return path.rstrip('/')
return os.path.normpath(os.path.join(self.path, path))
def ExpandInputRoot(self, template, expansion, dirname):
if '%(INPUT_ROOT)s' not in template and '%(INPUT_DIRNAME)s' not in template:
return template
path = template % {
'INPUT_ROOT': expansion,
'INPUT_DIRNAME': dirname,
}
return path
def _InstallableTargetInstallPath(self):
"""Returns the location of the final output for an installable target."""
# Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
# rely on this. Emulate this behavior for mac.
# XXX(TooTallNate): disabling this code since we don't want this behavior...
#if (self.type == 'shared_library' and
# (self.flavor != 'mac' or self.toolset != 'target')):
# # Install all shared libs into a common directory (per toolset) for
# # convenient access with LD_LIBRARY_PATH.
# return '$(builddir)/lib.%s/%s' % (self.toolset, self.alias)
return '$(builddir)/' + self.alias
def WriteAutoRegenerationRule(params, root_makefile, makefile_name,
build_files):
"""Write the target to regenerate the Makefile."""
options = params['options']
build_files_args = [gyp.common.RelativePath(filename, options.toplevel_dir)
for filename in params['build_files_arg']]
gyp_binary = gyp.common.FixIfRelativePath(params['gyp_binary'],
options.toplevel_dir)
if not gyp_binary.startswith(os.sep):
gyp_binary = os.path.join('.', gyp_binary)
root_makefile.write(
"quiet_cmd_regen_makefile = ACTION Regenerating $@\n"
"cmd_regen_makefile = cd $(srcdir); %(cmd)s\n"
"%(makefile_name)s: %(deps)s\n"
"\t$(call do_cmd,regen_makefile)\n\n" % {
'makefile_name': makefile_name,
'deps': ' '.join(map(Sourceify, build_files)),
'cmd': gyp.common.EncodePOSIXShellList(
[gyp_binary, '-fmake'] +
gyp.RegenerateFlags(options) +
build_files_args)})
def PerformBuild(data, configurations, params):
options = params['options']
for config in configurations:
arguments = ['make']
if options.toplevel_dir and options.toplevel_dir != '.':
arguments += '-C', options.toplevel_dir
arguments.append('BUILDTYPE=' + config)
print 'Building [%s]: %s' % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
flavor = gyp.common.GetFlavor(params)
generator_flags = params.get('generator_flags', {})
builddir_name = generator_flags.get('output_dir', 'out')
android_ndk_version = generator_flags.get('android_ndk_version', None)
default_target = generator_flags.get('default_target', 'all')
def CalculateMakefilePath(build_file, base_name):
"""Determine where to write a Makefile for a given gyp file."""
# Paths in gyp files are relative to the .gyp file, but we want
# paths relative to the source root for the master makefile. Grab
# the path of the .gyp file as the base to relativize against.
# E.g. "foo/bar" when we're constructing targets for "foo/bar/baz.gyp".
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.depth)
# We write the file in the base_path directory.
output_file = os.path.join(options.depth, base_path, base_name)
if options.generator_output:
output_file = os.path.join(
options.depth, options.generator_output, base_path, base_name)
base_path = gyp.common.RelativePath(os.path.dirname(build_file),
options.toplevel_dir)
return base_path, output_file
# TODO: search for the first non-'Default' target. This can go
# away when we add verification that all targets have the
# necessary configurations.
default_configuration = None
toolsets = set([target_dicts[target]['toolset'] for target in target_list])
for target in target_list:
spec = target_dicts[target]
if spec['default_configuration'] != 'Default':
default_configuration = spec['default_configuration']
break
if not default_configuration:
default_configuration = 'Default'
srcdir = '.'
makefile_name = 'Makefile' + options.suffix
makefile_path = os.path.join(options.toplevel_dir, makefile_name)
if options.generator_output:
global srcdir_prefix
makefile_path = os.path.join(
options.toplevel_dir, options.generator_output, makefile_name)
srcdir = gyp.common.RelativePath(srcdir, options.generator_output)
srcdir_prefix = '$(srcdir)/'
flock_command= 'flock'
copy_archive_arguments = '-af'
header_params = {
'default_target': default_target,
'builddir': builddir_name,
'default_configuration': default_configuration,
'flock': flock_command,
'flock_index': 1,
'link_commands': LINK_COMMANDS_LINUX,
'extra_commands': '',
'srcdir': srcdir,
'copy_archive_args': copy_archive_arguments,
}
if flavor == 'mac':
flock_command = './gyp-mac-tool flock'
header_params.update({
'flock': flock_command,
'flock_index': 2,
'link_commands': LINK_COMMANDS_MAC,
'extra_commands': SHARED_HEADER_MAC_COMMANDS,
})
elif flavor == 'android':
header_params.update({
'link_commands': LINK_COMMANDS_ANDROID,
})
elif flavor == 'solaris':
header_params.update({
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
elif flavor == 'freebsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
})
elif flavor == 'openbsd':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
})
elif flavor == 'aix':
copy_archive_arguments = '-pPRf'
header_params.update({
'copy_archive_args': copy_archive_arguments,
'link_commands': LINK_COMMANDS_AIX,
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
header_params.update({
'CC.target': GetEnvironFallback(('CC_target', 'CC'), '$(CC)'),
'AR.target': GetEnvironFallback(('AR_target', 'AR'), '$(AR)'),
'CXX.target': GetEnvironFallback(('CXX_target', 'CXX'), '$(CXX)'),
'LINK.target': GetEnvironFallback(('LINK_target', 'LINK'), '$(LINK)'),
'CC.host': GetEnvironFallback(('CC_host',), 'gcc'),
'AR.host': GetEnvironFallback(('AR_host',), 'ar'),
'CXX.host': GetEnvironFallback(('CXX_host',), 'g++'),
'LINK.host': GetEnvironFallback(('LINK_host',), '$(CXX.host)'),
})
build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0])
make_global_settings_array = data[build_file].get('make_global_settings', [])
wrappers = {}
for key, value in make_global_settings_array:
if key.endswith('_wrapper'):
wrappers[key[:-len('_wrapper')]] = '$(abspath %s)' % value
make_global_settings = ''
for key, value in make_global_settings_array:
if re.match('.*_wrapper', key):
continue
if value[0] != '$':
value = '$(abspath %s)' % value
wrapper = wrappers.get(key)
if wrapper:
value = '%s %s' % (wrapper, value)
del wrappers[key]
if key in ('CC', 'CC.host', 'CXX', 'CXX.host'):
make_global_settings += (
'ifneq (,$(filter $(origin %s), undefined default))\n' % key)
# Let gyp-time envvars win over global settings.
env_key = key.replace('.', '_') # CC.host -> CC_host
if env_key in os.environ:
value = os.environ[env_key]
make_global_settings += ' %s = %s\n' % (key, value)
make_global_settings += 'endif\n'
else:
make_global_settings += '%s ?= %s\n' % (key, value)
# TODO(ukai): define cmd when only wrapper is specified in
# make_global_settings.
header_params['make_global_settings'] = make_global_settings
gyp.common.EnsureDirExists(makefile_path)
root_makefile = open(makefile_path, 'w')
root_makefile.write(SHARED_HEADER % header_params)
# Currently any versions have the same effect, but in future the behavior
# could be different.
if android_ndk_version:
root_makefile.write(
'# Define LOCAL_PATH for build of Android applications.\n'
'LOCAL_PATH := $(call my-dir)\n'
'\n')
for toolset in toolsets:
root_makefile.write('TOOLSET := %s\n' % toolset)
WriteRootHeaderSuffixRules(root_makefile)
# Put build-time support tools next to the root Makefile.
dest_path = os.path.dirname(makefile_path)
gyp.common.CopyTool(flavor, dest_path)
# Find the list of targets that derive from the gyp file(s) being built.
needed_targets = set()
for build_file in params['build_files']:
for target in gyp.common.AllTargets(target_list, target_dicts, build_file):
needed_targets.add(target)
build_files = set()
include_list = set()
for qualified_target in target_list:
build_file, target, toolset = gyp.common.ParseQualifiedTarget(
qualified_target)
this_make_global_settings = data[build_file].get('make_global_settings', [])
assert make_global_settings_array == this_make_global_settings, (
"make_global_settings needs to be the same for all targets. %s vs. %s" %
(this_make_global_settings, make_global_settings))
build_files.add(gyp.common.RelativePath(build_file, options.toplevel_dir))
included_files = data[build_file]['included_files']
for included_file in included_files:
# The included_files entries are relative to the dir of the build file
# that included them, so we have to undo that and then make them relative
# to the root dir.
relative_include_file = gyp.common.RelativePath(
gyp.common.UnrelativePath(included_file, build_file),
options.toplevel_dir)
abs_include_file = os.path.abspath(relative_include_file)
# If the include file is from the ~/.gyp dir, we should use absolute path
# so that relocating the src dir doesn't break the path.
if (params['home_dot_gyp'] and
abs_include_file.startswith(params['home_dot_gyp'])):
build_files.add(abs_include_file)
else:
build_files.add(relative_include_file)
base_path, output_file = CalculateMakefilePath(build_file,
target + '.' + toolset + options.suffix + '.mk')
spec = target_dicts[qualified_target]
configs = spec['configurations']
if flavor == 'mac':
gyp.xcode_emulation.MergeGlobalXcodeSettingsToSpec(data[build_file], spec)
writer = MakefileWriter(generator_flags, flavor)
writer.Write(qualified_target, base_path, output_file, spec, configs,
part_of_all=qualified_target in needed_targets)
# Our root_makefile lives at the source root. Compute the relative path
# from there to the output_file for including.
mkfile_rel_path = gyp.common.RelativePath(output_file,
os.path.dirname(makefile_path))
include_list.add(mkfile_rel_path)
# Write out per-gyp (sub-project) Makefiles.
depth_rel_path = gyp.common.RelativePath(options.depth, os.getcwd())
for build_file in build_files:
# The paths in build_files were relativized above, so undo that before
# testing against the non-relativized items in target_list and before
# calculating the Makefile path.
build_file = os.path.join(depth_rel_path, build_file)
gyp_targets = [target_dicts[target]['target_name'] for target in target_list
if target.startswith(build_file) and
target in needed_targets]
# Only generate Makefiles for gyp files with targets.
if not gyp_targets:
continue
base_path, output_file = CalculateMakefilePath(build_file,
os.path.splitext(os.path.basename(build_file))[0] + '.Makefile')
makefile_rel_path = gyp.common.RelativePath(os.path.dirname(makefile_path),
os.path.dirname(output_file))
writer.WriteSubMake(output_file, makefile_rel_path, gyp_targets,
builddir_name)
# Write out the sorted list of includes.
root_makefile.write('\n')
for include_file in sorted(include_list):
# We wrap each .mk include in an if statement so users can tell make to
# not load a file by setting NO_LOAD. The below make code says, only
# load the .mk file if the .mk filename doesn't start with a token in
# NO_LOAD.
root_makefile.write(
"ifeq ($(strip $(foreach prefix,$(NO_LOAD),\\\n"
" $(findstring $(join ^,$(prefix)),\\\n"
" $(join ^," + include_file + ")))),)\n")
root_makefile.write(" include " + include_file + "\n")
root_makefile.write("endif\n")
root_makefile.write('\n')
if (not generator_flags.get('standalone')
and generator_flags.get('auto_regeneration', True)):
WriteAutoRegenerationRule(params, root_makefile, makefile_name, build_files)
root_makefile.write(SHARED_FOOTER)
root_makefile.close()
| gpl-2.0 |
ericlink/adms-server | playframework-dist/1.1-src/python/Lib/encodings/utf_8.py | 103 | 1047 | """ Python 'utf-8' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
encode = codecs.utf_8_encode
def decode(input, errors='strict'):
return codecs.utf_8_decode(input, errors, True)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.utf_8_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.BufferedIncrementalDecoder):
_buffer_decode = codecs.utf_8_decode
class StreamWriter(codecs.StreamWriter):
encode = codecs.utf_8_encode
class StreamReader(codecs.StreamReader):
decode = codecs.utf_8_decode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='utf-8',
encode=encode,
decode=decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit |
hoatle/odoo | addons/account_payment/__openerp__.py | 261 | 2925 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Suppliers Payment Management',
'version': '1.1',
'author': 'OpenERP SA',
'category': 'Accounting & Finance',
'description': """
Module to manage the payment of your supplier invoices.
=======================================================
This module allows you to create and manage your payment orders, with purposes to
---------------------------------------------------------------------------------
* serve as base for an easy plug-in of various automated payment mechanisms.
* provide a more efficient way to manage invoice payment.
Warning:
~~~~~~~~
The confirmation of a payment order does _not_ create accounting entries, it just
records the fact that you gave your payment order to your bank. The booking of
your order must be encoded as usual through a bank statement. Indeed, it's only
when you get the confirmation from your bank that your order has been accepted
that you can book it in your accounting. To help you with that operation, you
have a new option to import payment orders as bank statement lines.
""",
'depends': ['account','account_voucher'],
'data': [
'security/account_payment_security.xml',
'security/ir.model.access.csv',
'wizard/account_payment_pay_view.xml',
'wizard/account_payment_populate_statement_view.xml',
'wizard/account_payment_create_order_view.xml',
'account_payment_view.xml',
'account_payment_workflow.xml',
'account_payment_sequence.xml',
'account_payment_report.xml',
'views/report_paymentorder.xml',
],
'demo': ['account_payment_demo.xml'],
'test': [
'test/account_payment_demo.yml',
'test/cancel_payment_order.yml',
'test/payment_order_process.yml',
'test/account_payment_report.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mikecroucher/GPy | GPy/models/warped_gp.py | 6 | 7238 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
import numpy as np
#from ..util.warping_functions import *
from ..core import GP
from .. import likelihoods
from paramz import ObsAr
#from GPy.util.warping_functions import TanhFunction
from ..util.warping_functions import TanhFunction
from GPy import kern
class WarpedGP(GP):
"""
This defines a GP Regression model that applies a
warping function to the output.
"""
def __init__(self, X, Y, kernel=None, warping_function=None, warping_terms=3, normalizer=False):
if kernel is None:
kernel = kern.RBF(X.shape[1])
if warping_function == None:
self.warping_function = TanhFunction(warping_terms)
self.warping_params = (np.random.randn(self.warping_function.n_terms * 3 + 1) * 1)
else:
self.warping_function = warping_function
likelihood = likelihoods.Gaussian()
super(WarpedGP, self).__init__(X, Y.copy(), likelihood=likelihood, kernel=kernel, normalizer=normalizer)
self.Y_normalized = self.Y_normalized.copy()
self.Y_untransformed = self.Y_normalized.copy()
self.predict_in_warped_space = True
self.link_parameter(self.warping_function)
def set_XY(self, X=None, Y=None):
super(WarpedGP, self).set_XY(X, Y)
self.Y_untransformed = self.Y_normalized.copy()
self.update_model(True)
def parameters_changed(self):
"""
Notice that we update the warping function gradients here.
"""
self.Y_normalized[:] = self.transform_data()
super(WarpedGP, self).parameters_changed()
Kiy = self.posterior.woodbury_vector.flatten()
self.warping_function.update_grads(self.Y_untransformed, Kiy)
def transform_data(self):
Y = self.warping_function.f(self.Y_untransformed.copy()).copy()
return Y
def log_likelihood(self):
"""
Notice we add the jacobian of the warping function here.
"""
ll = GP.log_likelihood(self)
jacobian = self.warping_function.fgrad_y(self.Y_untransformed)
return ll + np.log(jacobian).sum()
def plot_warping(self):
self.warping_function.plot(self.Y_untransformed.min(), self.Y_untransformed.max())
def _get_warped_term(self, mean, std, gh_samples, pred_init=None):
arg1 = gh_samples.dot(std.T) * np.sqrt(2)
arg2 = np.ones(shape=gh_samples.shape).dot(mean.T)
return self.warping_function.f_inv(arg1 + arg2, y=pred_init)
def _get_warped_mean(self, mean, std, pred_init=None, deg_gauss_hermite=20):
"""
Calculate the warped mean by using Gauss-Hermite quadrature.
"""
gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite)
gh_samples = gh_samples[:, None]
gh_weights = gh_weights[None, :]
return gh_weights.dot(self._get_warped_term(mean, std, gh_samples)) / np.sqrt(np.pi)
def _get_warped_variance(self, mean, std, pred_init=None, deg_gauss_hermite=20):
"""
Calculate the warped variance by using Gauss-Hermite quadrature.
"""
gh_samples, gh_weights = np.polynomial.hermite.hermgauss(deg_gauss_hermite)
gh_samples = gh_samples[:, None]
gh_weights = gh_weights[None, :]
arg1 = gh_weights.dot(self._get_warped_term(mean, std, gh_samples,
pred_init=pred_init) ** 2) / np.sqrt(np.pi)
arg2 = self._get_warped_mean(mean, std, pred_init=pred_init,
deg_gauss_hermite=deg_gauss_hermite)
return arg1 - (arg2 ** 2)
def predict(self, Xnew, kern=None, pred_init=None, Y_metadata=None,
median=False, deg_gauss_hermite=20, likelihood=None):
"""
Prediction results depend on:
- The value of the self.predict_in_warped_space flag
- The median flag passed as argument
The likelihood keyword is never used, it is just to follow the plotting API.
"""
#mu, var = GP._raw_predict(self, Xnew)
# now push through likelihood
#mean, var = self.likelihood.predictive_values(mu, var)
mean, var = super(WarpedGP, self).predict(Xnew, kern=kern, full_cov=False, likelihood=likelihood)
if self.predict_in_warped_space:
std = np.sqrt(var)
if median:
wmean = self.warping_function.f_inv(mean, y=pred_init)
else:
wmean = self._get_warped_mean(mean, std, pred_init=pred_init,
deg_gauss_hermite=deg_gauss_hermite).T
wvar = self._get_warped_variance(mean, std, pred_init=pred_init,
deg_gauss_hermite=deg_gauss_hermite).T
else:
wmean = mean
wvar = var
return wmean, wvar
def predict_quantiles(self, X, quantiles=(2.5, 97.5), Y_metadata=None, likelihood=None, kern=None):
"""
Get the predictive quantiles around the prediction at X
:param X: The points at which to make a prediction
:type X: np.ndarray (Xnew x self.input_dim)
:param quantiles: tuple of quantiles, default is (2.5, 97.5) which is the 95% interval
:type quantiles: tuple
:returns: list of quantiles for each X and predictive quantiles for interval combination
:rtype: [np.ndarray (Xnew x self.input_dim), np.ndarray (Xnew x self.input_dim)]
"""
qs = super(WarpedGP, self).predict_quantiles(X, quantiles, Y_metadata=Y_metadata, likelihood=likelihood, kern=kern)
if self.predict_in_warped_space:
return [self.warping_function.f_inv(q) for q in qs]
return qs
#m, v = self._raw_predict(X, full_cov=False)
#if self.normalizer is not None:
# m, v = self.normalizer.inverse_mean(m), self.normalizer.inverse_variance(v)
#a, b = self.likelihood.predictive_quantiles(m, v, quantiles, Y_metadata)
#if not self.predict_in_warped_space:
# return [a, b]
#new_a = self.warping_function.f_inv(a)
#new_b = self.warping_function.f_inv(b)
#return [new_a, new_b]
def log_predictive_density(self, x_test, y_test, Y_metadata=None):
"""
Calculation of the log predictive density. Notice we add
the jacobian of the warping function here.
.. math:
p(y_{*}|D) = p(y_{*}|f_{*})p(f_{*}|\mu_{*}\\sigma^{2}_{*})
:param x_test: test locations (x_{*})
:type x_test: (Nx1) array
:param y_test: test observations (y_{*})
:type y_test: (Nx1) array
:param Y_metadata: metadata associated with the test points
"""
mu_star, var_star = self._raw_predict(x_test)
fy = self.warping_function.f(y_test)
ll_lpd = self.likelihood.log_predictive_density(fy, mu_star, var_star, Y_metadata=Y_metadata)
return ll_lpd + np.log(self.warping_function.fgrad_y(y_test))
if __name__ == '__main__':
X = np.random.randn(100, 1)
Y = np.sin(X) + np.random.randn(100, 1)*0.05
m = WarpedGP(X, Y)
| bsd-3-clause |
RickHutten/paparazzi | sw/ground_segment/python/real_time_plot/plotpanel.py | 25 | 14277 | from __future__ import absolute_import, print_function, division
import wx
from ivy.std_api import *
from ivy.ivy import IvyIllegalStateError
import logging
from textdroptarget import *
import math
import random
import sys
from os import getenv, path
import messagepicker
# if PAPARAZZI_SRC not set, then assume the tree containing this
# file is a reasonable substitute
PPRZ_SRC = getenv("PAPARAZZI_SRC", path.normpath(path.join(path.dirname(path.abspath(__file__)), '../../../../')))
sys.path.append(PPRZ_SRC + "/sw/lib/python")
sys.path.append(PPRZ_SRC + "/sw/ext/pprzlink/lib/v1.0/python")
import pprz_env
from pprzlink import messages_xml_map
from pprzlink.ivy import IvyMessagesInterface
from pprzlink.message import PprzMessage
class PlotData:
def __init__(self, ivy_msg_id, title, width, color=None, scale=1.0):
self.id = ivy_msg_id
self.title = title
self.SetPlotSize(width)
self.x_min = 1e32
self.x_max = 1e-32
self.avg = 0.0
self.std_dev = 0.0
self.real_time = False
self.scale = scale
self.offset = 0.0
if color is not None:
self.color = color
else:
r, g, b = random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
self.color = wx.Colour(r, g, b)
def SetRealTime(self, value):
self.real_time = value
def SetOffset(self, value):
self.offset = value
def SetScale(self, value):
self.scale = value
def SetPlotSize(self, size):
self.size = size
self.index = size - 1 # holds the index of the next point to add and the first point to draw
self.data = [] # holds the list of points to plot
for i in range(size):
self.data.append(None)
self.avg = 0.0
self.std_dev = 0.0
def AddPoint(self, point, x_axis):
self.data[self.index] = point
if self.real_time or (x_axis is not None):
self.index = (self.index + 1) % self.size # increment index to next point
self.data[self.index] = None
def DrawTitle(self, dc, margin, width, height):
text = 'avg:%.2f std:%.2f %s' % (self.avg, self.std_dev, self.title)
(w, h) = dc.GetTextExtent(text)
dc.SetBrush(wx.Brush(self.color))
dc.DrawRectangle(width - h - margin, height, h, h)
dc.DrawText(text, width - 2 * margin - w - h, height)
return h
def DrawCurve(self, dc, width, height, margin, _max_, _min_, x_axis):
if width != self.size:
self.SetPlotSize(width)
return
if (not self.real_time) and (x_axis is None):
self.index = (self.index + 1) % self.size # increment index to next point
self.data[self.index] = None
if x_axis is not None:
(x_min, x_max) = x_axis.GetXMinMax()
dc.SetPen(wx.Pen(self.color, 1))
if _max_ < _min_:
(_min_, _max_) = (-1, 1) # prevent divide by zero or inversion
if _max_ == _min_:
(_min_, _max_) = (_max_ - 0.5, _max_ + 0.5)
delta = _max_ - _min_
dy = (height - margin * 2) / delta
n = 0
sums = 0.0
sum_squares = 0.0
lines = []
point_1 = None
for i in range(self.size):
ix = (i + self.index) % self.size
point = self.data[ix]
if point is None:
continue
n += 1
sums = sums + point
sum_squares = sum_squares + (point * point)
if x_axis is not None:
x = x_axis.data[ix]
if x is None:
continue
dx = (width - 1) / (x_max - x_min)
x = int((x - x_min) * dx)
else:
x = i * width / self.size
scaled_point = (point + self.offset) * self.scale
y = height - margin - int((scaled_point - _min_) * dy)
if point_1 is not None:
line = (point_1[0], point_1[1], x, y)
lines.append(line)
point_1 = (x, y)
dc.DrawLineList(lines)
if n > 0:
self.avg = sums / n
self.std_dev = math.sqrt(math.fabs((sum_squares / n) - (self.avg * self.avg)))
def GetXMinMax(self):
x_min = 1e32
x_max = -1e32
for i in range(self.size):
point = self.data[i]
if point is None:
continue
x_min = min(x_min, point)
x_max = max(x_max, point)
if x_max < x_min:
(x_min, x_max) = (-1, 1) # prevent divide by zero or inversion
if x_max == x_min:
(x_min, x_max) = (x_max - 0.5, x_max + 0.5)
self.x_max = x_max
self.x_min = x_min
return (x_min, x_max)
_IVY_APPNAME = 'RealtimePlot'
_IVY_STRING = '(%s %s .*$)'
# _IVY_STRING = '^([^ ]*) +(%s( .*|$))' ## <-- from original ocaml (doesn't work here, just returns Sender field...)
def create(parent, frame):
return PlotPanel(parent, frame)
class PlotPanel(object):
def __init__(self, parent, frame):
self.parent = parent # we are drawing on our parent, so dc comes from this
self.frame = frame # the frame owns any controls we might need to update
parent.SetDropTarget(TextDropTarget(self)) # calls self.OnDropText when drag and drop complete
self.width = 800
self.height = 200
self.margin = min(self.height / 10, 20)
self.font = wx.Font(self.margin / 2, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
self.pixmap = wx.EmptyBitmap(self.width, self.height)
self.plot_size = self.width
self.max = -1e32
self.min = 1e32
self.plot_interval = 200
self.plots = {}
self.auto_scale = True
self.offset = 0.0
self.scale = 1.0
self.x_axis = None
messages_xml_map.parse_messages()
self.ivy_interface = IvyMessagesInterface(_IVY_APPNAME)
# start the timer
self.timer = wx.FutureCall(self.plot_interval, self.OnTimer)
def SetPlotInterval(self, value):
self.plot_interval = value
self.timer.Restart(self.plot_interval)
self.timer = wx.FutureCall(self.plot_interval, self.OnTimer)
def SetAutoScale(self, value):
self.auto_scale = value
def SetMin(self, value):
self.min = value
def SetMax(self, value):
self.max = value
def Pause(self, pause):
if pause:
self.timer.Stop()
else:
self.timer = wx.FutureCall(self.plot_interval, self.OnTimer)
def ResetScale(self):
self.max = -1e32
self.min = 1e32
def OnClose(self):
self.timer.Stop()
try:
IvyStop()
except IvyIllegalStateError as e:
print(e)
def OnErase(self, event):
pass
def ShowMessagePicker(self, parent):
frame = messagepicker.MessagePicker(parent, self.BindCurve, self.ivy_interface)
frame.Show()
def OnDropText(self, data):
[ac_id, category, message, field, scale] = data.encode('ASCII').split(':')
self.BindCurve(int(ac_id), message, field, scale=float(scale))
def OnIvyMsg(self, agent, *larg):
# print(larg[0])
data = larg[0].split(' ')
ac_id = int(data[0])
message = data[1]
if ac_id not in self.plots:
return
if message not in self.plots[ac_id]:
return
for field in self.plots[ac_id][message]:
plot = self.plots[ac_id][message][field]
ix = messages_xml_map.message_dictionary["telemetry"][message].index(field)
point = float(data[ix + 2])
if self.x_axis is None or self.x_axis.id != plot.id:
if self.auto_scale:
scaled_point = (point + plot.offset) * plot.scale
self.max = max(self.max, scaled_point)
self.min = min(self.min, scaled_point)
if self.x_axis is not None:
plot.index = self.x_axis.index
plot.AddPoint(point, self.x_axis)
def BindCurve(self, ac_id, message, field, color=None, use_as_x=False, scale=1.0):
# -- add this telemetry to our list of things to plot ...
message_string = _IVY_STRING % (ac_id, message)
# print('Binding to %s' % message_string)
if ac_id not in self.plots:
self.plots[ac_id] = {}
if message not in self.plots[ac_id]:
self.plots[ac_id][message] = {}
if field in self.plots[ac_id][message]:
self.plots[ac_id][message][field].color = wx.Color(random.randint(0, 255), random.randint(0, 255),
random.randint(0, 255))
return
ivy_id = self.ivy_interface.bind_raw(self.OnIvyMsg, str(message_string))
title = '%i:%s:%s' % (ac_id, message, field)
self.plots[ac_id][message][field] = PlotData(ivy_id, title, self.plot_size, color, scale)
self.frame.AddCurve(ivy_id, title, use_as_x)
if use_as_x:
self.x_axis = self.plots[ac_id][message][field]
def CalcMinMax(self, plot):
if not self.auto_scale: return
for x in plot.data:
self.max = max(self.max, x)
self.min = min(self.min, x)
self.frame.SetMinMax(self.min, self.max)
def FindPlotName(self, ivy_id):
for ac_id in self.plots:
for msg in self.plots[ac_id]:
for field in self.plots[ac_id][msg]:
if self.plots[ac_id][msg][field].id == ivy_id:
return (ac_id, msg, field)
return (None, None, None)
def FindPlot(self, ivy_id):
(ac_id, msg, field) = self.FindPlotName(ivy_id)
if ac_id is None:
return None
return self.plots[ac_id][msg][field]
def RemovePlot(self, ivy_id):
(ac_id, msg, field) = self.FindPlotName(ivy_id)
if ac_id is None:
return
if (self.x_axis is not None) and (self.x_axis.id == ivy_id):
self.x_axis = None
self.ivy_interface.unbind(ivy_id)
del self.plots[ac_id][msg][field]
if len(self.plots[ac_id][msg]) == 0:
del self.plots[ac_id][msg]
def OffsetPlot(self, ivy_id, offset):
plot = self.FindPlot(ivy_id)
if plot is None:
return
plot.SetOffset(offset)
print('panel value: %.2f' % value)
CalcMinMax(plot)
def ScalePlot(self, ivy_id, offset):
plot = self.FindPlot(ivy_id)
if plot is None:
return
plot.SetScale(offset)
CalcMinMax(plot)
def SetRealTime(self, ivy_id, value):
plot = self.FindPlot(ivy_id)
if plot is None:
return
plot.SetRealTime(value)
def SetXAxis(self, ivy_id):
plot = self.FindPlot(ivy_id)
if plot is None:
return
self.x_axis = plot
def ClearXAxis(self):
self.x_axis = None
def OnSize(self, size):
(width, height) = size
if self.width == width and self.height == height:
return
self.pixmap = wx.EmptyBitmap(width, height)
self.width = width
self.height = height
self.plot_size = width
self.margin = min(self.height / 10, 20)
self.font = wx.Font(self.margin / 2, wx.DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL)
def OnTimer(self):
self.timer.Restart(self.plot_interval)
self.frame.SetMinMax(self.min, self.max)
self.DrawFrame()
def DrawFrame(self):
dc = wx.ClientDC(self.parent)
bdc = wx.BufferedDC(dc, self.pixmap)
bdc.SetBackground(wx.Brush("White"))
bdc.Clear()
self.DrawBackground(bdc, self.width, self.height)
title_y = 2
for ac_id in self.plots:
for message in self.plots[ac_id]:
for field in self.plots[ac_id][message]:
plot = self.plots[ac_id][message][field]
if (self.x_axis is not None) and (self.x_axis.id == plot.id):
continue
title_height = plot.DrawTitle(bdc, 2, self.width, title_y)
plot.DrawCurve(bdc, self.width, self.height, self.margin, self.max, self.min, self.x_axis)
title_y += title_height + 2
def DrawBackground(self, dc, width, height):
# Time Graduations
dc.SetFont(self.font)
if self.x_axis is None:
t = self.plot_interval * width
t1 = "0.0s"
t2 = "-%.1fs" % (t / 2000.0)
t3 = "-%.1fs" % (t / 1000.0)
else:
x_max = self.x_axis.x_max
x_min = self.x_axis.x_min
t1 = "%.2f" % x_max
t2 = "%.2f" % (x_min + (x_max - x_min) / 2.0)
t3 = "%.2f" % x_min
(w, h) = dc.GetTextExtent(t1)
dc.DrawText(t1, width - w, height - h)
# (w,h) = dc.GetTextExtent(t2) #save time since h will be the same
dc.DrawText(t2, width / 2, height - h)
# (w,h) = dc.GetTextExtent(t3) #save time since h will be the same
dc.DrawText(t3, 0, height - h)
# Y graduations
if self.max == -1e32:
return
(_min_, _max_) = (self.min, self.max)
if _max_ < _min_: # prevent divide by zero or inversion
(_min_, _max_) = (-1, 1)
if _max_ == _min_:
(_min_, _max_) = (_max_ - 0.5, _max_ + 0.5)
delta = _max_ - _min_
dy = (height - self.margin * 2) / delta
scale = math.log10(delta)
d = math.pow(10.0, math.floor(scale))
u = d
if delta < 2 * d:
u = d / 5
elif delta < 5 * d:
u = d / 2
tick_min = _min_ - math.fmod(_min_, u)
for i in range(int(delta / u) + 1):
tick = tick_min + float(i) * u
s = str(tick)
(w, h) = dc.GetTextExtent(s)
y = height - self.margin - int((tick - _min_) * dy) - h / 2
dc.DrawText(s, 0, y)
| gpl-2.0 |
slevenhagen/odoo-npg | openerp/report/render/rml2html/rml2html.py | 438 | 15438 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2005, Fabien Pinckaers, UCL, FSA
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
##############################################################################
import sys
import cStringIO
from lxml import etree
import copy
from openerp.report.render.rml2pdf import utils
class _flowable(object):
def __init__(self, template, doc, localcontext = None):
self._tags = {
'title': self._tag_title,
'spacer': self._tag_spacer,
'para': self._tag_para,
'section':self._section,
'nextFrame': self._tag_next_frame,
'blockTable': self._tag_table,
'pageBreak': self._tag_page_break,
'setNextTemplate': self._tag_next_template,
}
self.template = template
self.doc = doc
self.localcontext = localcontext
self._cache = {}
def _tag_page_break(self, node):
return '<br/>'*3
def _tag_next_template(self, node):
return ''
def _tag_next_frame(self, node):
result=self.template.frame_stop()
result+='<br/>'
result+=self.template.frame_start()
return result
def _tag_title(self, node):
node.tag='h1'
return etree.tostring(node)
def _tag_spacer(self, node):
length = 1+int(utils.unit_get(node.get('length')))/35
return "<br/>"*length
def _tag_table(self, node):
new_node = copy.deepcopy(node)
for child in new_node:
new_node.remove(child)
new_node.tag = 'table'
def process(node,new_node):
for child in utils._child_get(node,self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.remove(n)
process(child, new_child)
else:
new_child.text = utils._process_text(self, child.text)
new_child.tag = 'p'
try:
if new_child.get('style').find('terp_tblheader')!= -1:
new_node.tag = 'th'
except Exception:
pass
process(node,new_node)
if new_node.get('colWidths',False):
sizes = map(lambda x: utils.unit_get(x), new_node.get('colWidths').split(','))
tr = etree.SubElement(new_node, 'tr')
for s in sizes:
etree.SubElement(tr, 'td', width=str(s))
return etree.tostring(new_node)
def _tag_para(self, node):
new_node = copy.deepcopy(node)
new_node.tag = 'p'
if new_node.attrib.get('style',False):
new_node.set('class', new_node.get('style'))
new_node.text = utils._process_text(self, node.text)
return etree.tostring(new_node)
def _section(self, node):
result = ''
for child in utils._child_get(node, self):
if child.tag in self._tags:
result += self._tags[child.tag](child)
return result
def render(self, node):
result = self.template.start()
result += self.template.frame_start()
for n in utils._child_get(node, self):
if n.tag in self._tags:
result += self._tags[n.tag](n)
else:
pass
result += self.template.frame_stop()
result += self.template.end()
return result.encode('utf-8').replace('"',"\'").replace('°','°')
class _rml_tmpl_tag(object):
def __init__(self, *args):
pass
def tag_start(self):
return ''
def tag_end(self):
return False
def tag_stop(self):
return ''
def tag_mergeable(self):
return True
class _rml_tmpl_frame(_rml_tmpl_tag):
def __init__(self, posx, width):
self.width = width
self.posx = posx
def tag_start(self):
return "<table border=\'0\' width=\'%d\'><tr><td width=\'%d\'> </td><td>" % (self.width+self.posx,self.posx)
def tag_end(self):
return True
def tag_stop(self):
return '</td></tr></table><br/>'
def tag_mergeable(self):
return False
def merge(self, frame):
pass
class _rml_tmpl_draw_string(_rml_tmpl_tag):
def __init__(self, node, style,localcontext = {}):
self.localcontext = localcontext
self.posx = utils.unit_get(node.get('x'))
self.posy = utils.unit_get(node.get('y'))
aligns = {
'drawString': 'left',
'drawRightString': 'right',
'drawCentredString': 'center'
}
align = aligns[node.tag]
self.pos = [(self.posx, self.posy, align, utils._process_text(self, node.text), style.get('td'), style.font_size_get('td'))]
def tag_start(self):
self.pos.sort()
res = "<table border='0' cellpadding='0' cellspacing='0'><tr>"
posx = 0
i = 0
for (x,y,align,txt, style, fs) in self.pos:
if align=="left":
pos2 = len(txt)*fs
res+="<td width=\'%d\'></td><td style=\'%s\' width=\'%d\'>%s</td>" % (x - posx, style, pos2, txt)
posx = x+pos2
if align=="right":
res+="<td width=\'%d\' align=\'right\' style=\'%s\'>%s</td>" % (x - posx, style, txt)
posx = x
if align=="center":
res+="<td width=\'%d\' align=\'center\' style=\'%s\'>%s</td>" % ((x - posx)*2, style, txt)
posx = 2*x-posx
i+=1
res+='</tr></table>'
return res
def merge(self, ds):
self.pos+=ds.pos
class _rml_tmpl_draw_lines(_rml_tmpl_tag):
def __init__(self, node, style, localcontext = {}):
self.localcontext = localcontext
coord = [utils.unit_get(x) for x in utils._process_text(self, node.text).split(' ')]
self.ok = False
self.posx = coord[0]
self.posy = coord[1]
self.width = coord[2]-coord[0]
self.ok = coord[1]==coord[3]
self.style = style
self.style = style.get('hr')
def tag_start(self):
if self.ok:
return "<table border=\'0\' cellpadding=\'0\' cellspacing=\'0\' width=\'%d\'><tr><td width=\'%d\'></td><td><hr width=\'100%%\' style=\'margin:0px; %s\'></td></tr></table>" % (self.posx+self.width,self.posx,self.style)
else:
return ''
class _rml_stylesheet(object):
def __init__(self, localcontext, stylesheet, doc):
self.doc = doc
self.localcontext = localcontext
self.attrs = {}
self._tags = {
'fontSize': lambda x: ('font-size',str(utils.unit_get(x)+5.0)+'px'),
'alignment': lambda x: ('text-align',str(x))
}
result = ''
for ps in stylesheet.findall('paraStyle'):
attr = {}
attrs = ps.attrib
for key, val in attrs.items():
attr[key] = val
attrs = []
for a in attr:
if a in self._tags:
attrs.append('%s:%s' % self._tags[a](attr[a]))
if len(attrs):
result += 'p.'+attr['name']+' {'+'; '.join(attrs)+'}\n'
self.result = result
def render(self):
return self.result
class _rml_draw_style(object):
def __init__(self):
self.style = {}
self._styles = {
'fill': lambda x: {'td': {'color':x.get('color')}},
'setFont': lambda x: {'td': {'font-size':x.get('size')+'px'}},
'stroke': lambda x: {'hr': {'color':x.get('color')}},
}
def update(self, node):
if node.tag in self._styles:
result = self._styles[node.tag](node)
for key in result:
if key in self.style:
self.style[key].update(result[key])
else:
self.style[key] = result[key]
def font_size_get(self,tag):
size = utils.unit_get(self.style.get('td', {}).get('font-size','16'))
return size
def get(self,tag):
if not tag in self.style:
return ""
return ';'.join(['%s:%s' % (x[0],x[1]) for x in self.style[tag].items()])
class _rml_template(object):
def __init__(self, template, localcontext=None):
self.frame_pos = -1
self.localcontext = localcontext
self.frames = []
self.template_order = []
self.page_template = {}
self.loop = 0
self._tags = {
'drawString': _rml_tmpl_draw_string,
'drawRightString': _rml_tmpl_draw_string,
'drawCentredString': _rml_tmpl_draw_string,
'lines': _rml_tmpl_draw_lines
}
self.style = _rml_draw_style()
rc = 'data:image/png;base64,'
self.data = ''
for pt in template.findall('pageTemplate'):
frames = {}
id = pt.get('id')
self.template_order.append(id)
for tmpl in pt.findall('frame'):
posy = int(utils.unit_get(tmpl.get('y1')))
posx = int(utils.unit_get(tmpl.get('x1')))
frames[(posy,posx,tmpl.get('id'))] = _rml_tmpl_frame(posx, utils.unit_get(tmpl.get('width')))
for tmpl in pt.findall('pageGraphics'):
for n in tmpl:
if n.tag == 'image':
self.data = rc + utils._process_text(self, n.text)
if n.tag in self._tags:
t = self._tags[n.tag](n, self.style,self.localcontext)
frames[(t.posy,t.posx,n.tag)] = t
else:
self.style.update(n)
keys = frames.keys()
keys.sort()
keys.reverse()
self.page_template[id] = []
for key in range(len(keys)):
if key>0 and keys[key-1][0] == keys[key][0]:
if type(self.page_template[id][-1]) == type(frames[keys[key]]):
if self.page_template[id][-1].tag_mergeable():
self.page_template[id][-1].merge(frames[keys[key]])
continue
self.page_template[id].append(frames[keys[key]])
self.template = self.template_order[0]
def _get_style(self):
return self.style
def set_next_template(self):
self.template = self.template_order[(self.template_order.index(name)+1) % self.template_order]
self.frame_pos = -1
def set_template(self, name):
self.template = name
self.frame_pos = -1
def frame_start(self):
result = ''
frames = self.page_template[self.template]
ok = True
while ok:
self.frame_pos += 1
if self.frame_pos>=len(frames):
self.frame_pos=0
self.loop=1
ok = False
continue
f = frames[self.frame_pos]
result+=f.tag_start()
ok = not f.tag_end()
if ok:
result+=f.tag_stop()
return result
def frame_stop(self):
frames = self.page_template[self.template]
f = frames[self.frame_pos]
result=f.tag_stop()
return result
def start(self):
return ''
def end(self):
result = ''
while not self.loop:
result += self.frame_start()
result += self.frame_stop()
return result
class _rml_doc(object):
def __init__(self, data, localcontext):
self.dom = etree.XML(data)
self.localcontext = localcontext
self.filename = self.dom.get('filename')
self.result = ''
def render(self, out):
self.result += '''<!DOCTYPE HTML PUBLIC "-//w3c//DTD HTML 4.0 Frameset//EN">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<style type="text/css">
p {margin:0px; font-size:12px;}
td {font-size:14px;}
'''
style = self.dom.findall('stylesheet')[0]
s = _rml_stylesheet(self.localcontext, style, self.dom)
self.result += s.render()
self.result+='''
</style>
'''
list_story =[]
for story in utils._child_get(self.dom, self, 'story'):
template = _rml_template(self.dom.findall('template')[0], self.localcontext)
f = _flowable(template, self.dom, localcontext = self.localcontext)
story_text = f.render(story)
list_story.append(story_text)
del f
if template.data:
tag = '''<img src = '%s' width=80 height=72/>'''% template.data
else:
tag = ''
self.result +='''
<script type="text/javascript">
var indexer = 0;
var aryTest = %s ;
function nextData()
{
if(indexer < aryTest.length -1)
{
indexer += 1;
document.getElementById("tiny_data").innerHTML=aryTest[indexer];
}
}
function prevData()
{
if (indexer > 0)
{
indexer -= 1;
document.getElementById("tiny_data").innerHTML=aryTest[indexer];
}
}
</script>
</head>
<body>
%s
<div id="tiny_data">
%s
</div>
<br>
<input type="button" value="next" onclick="nextData();">
<input type="button" value="prev" onclick="prevData();">
</body></html>'''%(list_story,tag,list_story[0])
out.write( self.result)
def parseString(data,localcontext = {}, fout=None):
r = _rml_doc(data, localcontext)
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = cStringIO.StringIO()
r.render(fp)
return fp.getvalue()
def rml2html_help():
print 'Usage: rml2html input.rml >output.html'
print 'Render the standard input (RML) and output an HTML file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
rml2html_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: rml2html input.rml >output.html'
print 'Try \'rml2html --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rosudrag/Freemium-winner | VirtualEnvironment/Lib/site-packages/sqlalchemy/testing/suite/test_types.py | 147 | 17088 | # coding: utf-8
from .. import fixtures, config
from ..assertions import eq_
from ..config import requirements
from sqlalchemy import Integer, Unicode, UnicodeText, select
from sqlalchemy import Date, DateTime, Time, MetaData, String, \
Text, Numeric, Float, literal, Boolean
from ..schema import Table, Column
from ... import testing
import decimal
import datetime
from ...util import u
from ... import util
class _LiteralRoundTripFixture(object):
@testing.provide_metadata
def _literal_round_trip(self, type_, input_, output, filter_=None):
"""test literal rendering """
# for literal, we test the literal render in an INSERT
# into a typed column. we can then SELECT it back as its
# official type; ideally we'd be able to use CAST here
# but MySQL in particular can't CAST fully
t = Table('t', self.metadata, Column('x', type_))
t.create()
for value in input_:
ins = t.insert().values(x=literal(value)).compile(
dialect=testing.db.dialect,
compile_kwargs=dict(literal_binds=True)
)
testing.db.execute(ins)
for row in t.select().execute():
value = row[0]
if filter_ is not None:
value = filter_(value)
assert value in output
class _UnicodeFixture(_LiteralRoundTripFixture):
__requires__ = 'unicode_data',
data = u("Alors vous imaginez ma surprise, au lever du jour, "
"quand une drôle de petite voix m’a réveillé. Elle "
"disait: « S’il vous plaît… dessine-moi un mouton! »")
@classmethod
def define_tables(cls, metadata):
Table('unicode_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('unicode_data', cls.datatype),
)
def test_round_trip(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
{
'unicode_data': self.data,
}
)
row = config.db.execute(
select([
unicode_table.c.unicode_data,
])
).first()
eq_(
row,
(self.data, )
)
assert isinstance(row[0], util.text_type)
def test_round_trip_executemany(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
[
{
'unicode_data': self.data,
}
for i in range(3)
]
)
rows = config.db.execute(
select([
unicode_table.c.unicode_data,
])
).fetchall()
eq_(
rows,
[(self.data, ) for i in range(3)]
)
for row in rows:
assert isinstance(row[0], util.text_type)
def _test_empty_strings(self):
unicode_table = self.tables.unicode_table
config.db.execute(
unicode_table.insert(),
{"unicode_data": u('')}
)
row = config.db.execute(
select([unicode_table.c.unicode_data])
).first()
eq_(row, (u(''),))
def test_literal(self):
self._literal_round_trip(self.datatype, [self.data], [self.data])
class UnicodeVarcharTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = 'unicode_data',
__backend__ = True
datatype = Unicode(255)
@requirements.empty_strings_varchar
def test_empty_strings_varchar(self):
self._test_empty_strings()
class UnicodeTextTest(_UnicodeFixture, fixtures.TablesTest):
__requires__ = 'unicode_data', 'text_type'
__backend__ = True
datatype = UnicodeText()
@requirements.empty_strings_text
def test_empty_strings_text(self):
self._test_empty_strings()
class TextTest(_LiteralRoundTripFixture, fixtures.TablesTest):
__requires__ = 'text_type',
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('text_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('text_data', Text),
)
def test_text_roundtrip(self):
text_table = self.tables.text_table
config.db.execute(
text_table.insert(),
{"text_data": 'some text'}
)
row = config.db.execute(
select([text_table.c.text_data])
).first()
eq_(row, ('some text',))
def test_text_empty_strings(self):
text_table = self.tables.text_table
config.db.execute(
text_table.insert(),
{"text_data": ''}
)
row = config.db.execute(
select([text_table.c.text_data])
).first()
eq_(row, ('',))
def test_literal(self):
self._literal_round_trip(Text, ["some text"], ["some text"])
def test_literal_quoting(self):
data = '''some 'text' hey "hi there" that's text'''
self._literal_round_trip(Text, [data], [data])
def test_literal_backslashes(self):
data = r'backslash one \ backslash two \\ end'
self._literal_round_trip(Text, [data], [data])
class StringTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
@requirements.unbounded_varchar
def test_nolength_string(self):
metadata = MetaData()
foo = Table('foo', metadata,
Column('one', String)
)
foo.create(config.db)
foo.drop(config.db)
def test_literal(self):
self._literal_round_trip(String(40), ["some text"], ["some text"])
def test_literal_quoting(self):
data = '''some 'text' hey "hi there" that's text'''
self._literal_round_trip(String(40), [data], [data])
def test_literal_backslashes(self):
data = r'backslash one \ backslash two \\ end'
self._literal_round_trip(String(40), [data], [data])
class _DateFixture(_LiteralRoundTripFixture):
compare = None
@classmethod
def define_tables(cls, metadata):
Table('date_table', metadata,
Column('id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('date_data', cls.datatype),
)
def test_round_trip(self):
date_table = self.tables.date_table
config.db.execute(
date_table.insert(),
{'date_data': self.data}
)
row = config.db.execute(
select([
date_table.c.date_data,
])
).first()
compare = self.compare or self.data
eq_(row,
(compare, ))
assert isinstance(row[0], type(compare))
def test_null(self):
date_table = self.tables.date_table
config.db.execute(
date_table.insert(),
{'date_data': None}
)
row = config.db.execute(
select([
date_table.c.date_data,
])
).first()
eq_(row, (None,))
@testing.requires.datetime_literals
def test_literal(self):
compare = self.compare or self.data
self._literal_round_trip(self.datatype, [self.data], [compare])
class DateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime',
__backend__ = True
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
class DateTimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime_microseconds',
__backend__ = True
datatype = DateTime
data = datetime.datetime(2012, 10, 15, 12, 57, 18, 396)
class TimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'time',
__backend__ = True
datatype = Time
data = datetime.time(12, 57, 18)
class TimeMicrosecondsTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'time_microseconds',
__backend__ = True
datatype = Time
data = datetime.time(12, 57, 18, 396)
class DateTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date',
__backend__ = True
datatype = Date
data = datetime.date(2012, 10, 15)
class DateTimeCoercedToDateTimeTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date', 'date_coerces_from_datetime'
__backend__ = True
datatype = Date
data = datetime.datetime(2012, 10, 15, 12, 57, 18)
compare = datetime.date(2012, 10, 15)
class DateTimeHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'datetime_historic',
__backend__ = True
datatype = DateTime
data = datetime.datetime(1850, 11, 10, 11, 52, 35)
class DateHistoricTest(_DateFixture, fixtures.TablesTest):
__requires__ = 'date_historic',
__backend__ = True
datatype = Date
data = datetime.date(1727, 4, 1)
class IntegerTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
def test_literal(self):
self._literal_round_trip(Integer, [5], [5])
class NumericTest(_LiteralRoundTripFixture, fixtures.TestBase):
__backend__ = True
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
@testing.provide_metadata
def _do_test(self, type_, input_, output,
filter_=None, check_scale=False):
metadata = self.metadata
t = Table('t', metadata, Column('x', type_))
t.create()
t.insert().execute([{'x': x} for x in input_])
result = set([row[0] for row in t.select().execute()])
output = set(output)
if filter_:
result = set(filter_(x) for x in result)
output = set(filter_(x) for x in output)
eq_(result, output)
if check_scale:
eq_(
[str(x) for x in result],
[str(x) for x in output],
)
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def test_render_literal_numeric(self):
self._literal_round_trip(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563")],
[decimal.Decimal("15.7563")],
)
@testing.emits_warning(r".*does \*not\* support Decimal objects natively")
def test_render_literal_numeric_asfloat(self):
self._literal_round_trip(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
)
def test_render_literal_float(self):
self._literal_round_trip(
Float(4),
[15.7563, decimal.Decimal("15.7563")],
[15.7563, ],
filter_=lambda n: n is not None and round(n, 5) or None
)
@testing.requires.precision_generic_float_type
def test_float_custom_scale(self):
self._do_test(
Float(None, decimal_return_scale=7, asdecimal=True),
[15.7563827, decimal.Decimal("15.7563827")],
[decimal.Decimal("15.7563827"), ],
check_scale=True
)
def test_numeric_as_decimal(self):
self._do_test(
Numeric(precision=8, scale=4),
[15.7563, decimal.Decimal("15.7563")],
[decimal.Decimal("15.7563")],
)
def test_numeric_as_float(self):
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
)
@testing.requires.fetch_null_from_numeric
def test_numeric_null_as_decimal(self):
self._do_test(
Numeric(precision=8, scale=4),
[None],
[None],
)
@testing.requires.fetch_null_from_numeric
def test_numeric_null_as_float(self):
self._do_test(
Numeric(precision=8, scale=4, asdecimal=False),
[None],
[None],
)
@testing.requires.floats_to_four_decimals
def test_float_as_decimal(self):
self._do_test(
Float(precision=8, asdecimal=True),
[15.7563, decimal.Decimal("15.7563"), None],
[decimal.Decimal("15.7563"), None],
)
def test_float_as_float(self):
self._do_test(
Float(precision=8),
[15.7563, decimal.Decimal("15.7563")],
[15.7563],
filter_=lambda n: n is not None and round(n, 5) or None
)
@testing.requires.precision_numerics_general
def test_precision_decimal(self):
numbers = set([
decimal.Decimal("54.234246451650"),
decimal.Decimal("0.004354"),
decimal.Decimal("900.0"),
])
self._do_test(
Numeric(precision=18, scale=12),
numbers,
numbers,
)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal(self):
"""test exceedingly small decimals.
Decimal reports values with E notation when the exponent
is greater than 6.
"""
numbers = set([
decimal.Decimal('1E-2'),
decimal.Decimal('1E-3'),
decimal.Decimal('1E-4'),
decimal.Decimal('1E-5'),
decimal.Decimal('1E-6'),
decimal.Decimal('1E-7'),
decimal.Decimal('1E-8'),
decimal.Decimal("0.01000005940696"),
decimal.Decimal("0.00000005940696"),
decimal.Decimal("0.00000000000696"),
decimal.Decimal("0.70000000000696"),
decimal.Decimal("696E-12"),
])
self._do_test(
Numeric(precision=18, scale=14),
numbers,
numbers
)
@testing.requires.precision_numerics_enotation_large
def test_enotation_decimal_large(self):
"""test exceedingly large decimals.
"""
numbers = set([
decimal.Decimal('4E+8'),
decimal.Decimal("5748E+15"),
decimal.Decimal('1.521E+15'),
decimal.Decimal('00000000000000.1E+12'),
])
self._do_test(
Numeric(precision=25, scale=2),
numbers,
numbers
)
@testing.requires.precision_numerics_many_significant_digits
def test_many_significant_digits(self):
numbers = set([
decimal.Decimal("31943874831932418390.01"),
decimal.Decimal("319438950232418390.273596"),
decimal.Decimal("87673.594069654243"),
])
self._do_test(
Numeric(precision=38, scale=12),
numbers,
numbers
)
@testing.requires.precision_numerics_retains_significant_digits
def test_numeric_no_decimal(self):
numbers = set([
decimal.Decimal("1.000")
])
self._do_test(
Numeric(precision=5, scale=3),
numbers,
numbers,
check_scale=True
)
class BooleanTest(_LiteralRoundTripFixture, fixtures.TablesTest):
__backend__ = True
@classmethod
def define_tables(cls, metadata):
Table('boolean_table', metadata,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('value', Boolean),
Column('unconstrained_value', Boolean(create_constraint=False)),
)
def test_render_literal_bool(self):
self._literal_round_trip(
Boolean(),
[True, False],
[True, False]
)
def test_round_trip(self):
boolean_table = self.tables.boolean_table
config.db.execute(
boolean_table.insert(),
{
'id': 1,
'value': True,
'unconstrained_value': False
}
)
row = config.db.execute(
select([
boolean_table.c.value,
boolean_table.c.unconstrained_value
])
).first()
eq_(
row,
(True, False)
)
assert isinstance(row[0], bool)
def test_null(self):
boolean_table = self.tables.boolean_table
config.db.execute(
boolean_table.insert(),
{
'id': 1,
'value': None,
'unconstrained_value': None
}
)
row = config.db.execute(
select([
boolean_table.c.value,
boolean_table.c.unconstrained_value
])
).first()
eq_(
row,
(None, None)
)
__all__ = ('UnicodeVarcharTest', 'UnicodeTextTest',
'DateTest', 'DateTimeTest', 'TextTest',
'NumericTest', 'IntegerTest',
'DateTimeHistoricTest', 'DateTimeCoercedToDateTimeTest',
'TimeMicrosecondsTest', 'TimeTest', 'DateTimeMicrosecondsTest',
'DateHistoricTest', 'StringTest', 'BooleanTest')
| mit |
sugarguo/Flask_Blog | ext_lib/setuptools/command/develop.py | 477 | 6447 | from setuptools.command.easy_install import easy_install
from distutils.util import convert_path, subst_vars
from pkg_resources import Distribution, PathMetadata, normalize_path
from distutils import log
from distutils.errors import DistutilsError, DistutilsOptionError
import os, sys, setuptools, glob
class develop(easy_install):
"""Set up package for development"""
description = "install package in 'development mode'"
user_options = easy_install.user_options + [
("uninstall", "u", "Uninstall this source package"),
("egg-path=", None, "Set the path to be used in the .egg-link file"),
]
boolean_options = easy_install.boolean_options + ['uninstall']
command_consumes_arguments = False # override base
def run(self):
if self.uninstall:
self.multi_version = True
self.uninstall_link()
else:
self.install_for_development()
self.warn_deprecated_options()
def initialize_options(self):
self.uninstall = None
self.egg_path = None
easy_install.initialize_options(self)
self.setup_path = None
self.always_copy_from = '.' # always copy eggs installed in curdir
def finalize_options(self):
ei = self.get_finalized_command("egg_info")
if ei.broken_egg_info:
raise DistutilsError(
"Please rename %r to %r before using 'develop'"
% (ei.egg_info, ei.broken_egg_info)
)
self.args = [ei.egg_name]
easy_install.finalize_options(self)
self.expand_basedirs()
self.expand_dirs()
# pick up setup-dir .egg files only: no .egg-info
self.package_index.scan(glob.glob('*.egg'))
self.egg_link = os.path.join(self.install_dir, ei.egg_name+'.egg-link')
self.egg_base = ei.egg_base
if self.egg_path is None:
self.egg_path = os.path.abspath(ei.egg_base)
target = normalize_path(self.egg_base)
if normalize_path(os.path.join(self.install_dir, self.egg_path)) != target:
raise DistutilsOptionError(
"--egg-path must be a relative path from the install"
" directory to "+target
)
# Make a distribution for the package's source
self.dist = Distribution(
target,
PathMetadata(target, os.path.abspath(ei.egg_info)),
project_name = ei.egg_name
)
p = self.egg_base.replace(os.sep,'/')
if p!= os.curdir:
p = '../' * (p.count('/')+1)
self.setup_path = p
p = normalize_path(os.path.join(self.install_dir, self.egg_path, p))
if p != normalize_path(os.curdir):
raise DistutilsOptionError(
"Can't get a consistent path to setup script from"
" installation directory", p, normalize_path(os.curdir))
def install_for_development(self):
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
# Fixup egg-link and easy-install.pth
ei_cmd = self.get_finalized_command("egg_info")
self.egg_path = build_path
self.dist.location = build_path
self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info) # XXX
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
self.install_site_py() # ensure that target dir is site-safe
if setuptools.bootstrap_install_from:
self.easy_install(setuptools.bootstrap_install_from)
setuptools.bootstrap_install_from = None
# create an .egg-link in the installation dir, pointing to our egg
log.info("Creating %s (link to %s)", self.egg_link, self.egg_base)
if not self.dry_run:
f = open(self.egg_link,"w")
f.write(self.egg_path + "\n" + self.setup_path)
f.close()
# postprocess the installed distro, fixing up .pth, installing scripts,
# and handling requirements
self.process_distribution(None, self.dist, not self.no_deps)
def uninstall_link(self):
if os.path.exists(self.egg_link):
log.info("Removing %s (link to %s)", self.egg_link, self.egg_base)
egg_link_file = open(self.egg_link)
contents = [line.rstrip() for line in egg_link_file]
egg_link_file.close()
if contents not in ([self.egg_path], [self.egg_path, self.setup_path]):
log.warn("Link points to %s: uninstall aborted", contents)
return
if not self.dry_run:
os.unlink(self.egg_link)
if not self.dry_run:
self.update_pth(self.dist) # remove any .pth link to us
if self.distribution.scripts:
# XXX should also check for entry point scripts!
log.warn("Note: you must uninstall or replace scripts manually!")
def install_egg_scripts(self, dist):
if dist is not self.dist:
# Installing a dependency, so fall back to normal behavior
return easy_install.install_egg_scripts(self,dist)
# create wrapper scripts in the script dir, pointing to dist.scripts
# new-style...
self.install_wrapper_scripts(dist)
# ...and old-style
for script_name in self.distribution.scripts or []:
script_path = os.path.abspath(convert_path(script_name))
script_name = os.path.basename(script_path)
f = open(script_path,'rU')
script_text = f.read()
f.close()
self.install_script(dist, script_name, script_text, script_path)
| gpl-3.0 |
eleonrk/SickRage | lib/unidecode/x056.py | 252 | 4615 | data = (
'Di ', # 0x00
'Qi ', # 0x01
'Jiao ', # 0x02
'Chong ', # 0x03
'Jiao ', # 0x04
'Kai ', # 0x05
'Tan ', # 0x06
'San ', # 0x07
'Cao ', # 0x08
'Jia ', # 0x09
'Ai ', # 0x0a
'Xiao ', # 0x0b
'Piao ', # 0x0c
'Lou ', # 0x0d
'Ga ', # 0x0e
'Gu ', # 0x0f
'Xiao ', # 0x10
'Hu ', # 0x11
'Hui ', # 0x12
'Guo ', # 0x13
'Ou ', # 0x14
'Xian ', # 0x15
'Ze ', # 0x16
'Chang ', # 0x17
'Xu ', # 0x18
'Po ', # 0x19
'De ', # 0x1a
'Ma ', # 0x1b
'Ma ', # 0x1c
'Hu ', # 0x1d
'Lei ', # 0x1e
'Du ', # 0x1f
'Ga ', # 0x20
'Tang ', # 0x21
'Ye ', # 0x22
'Beng ', # 0x23
'Ying ', # 0x24
'Saai ', # 0x25
'Jiao ', # 0x26
'Mi ', # 0x27
'Xiao ', # 0x28
'Hua ', # 0x29
'Mai ', # 0x2a
'Ran ', # 0x2b
'Zuo ', # 0x2c
'Peng ', # 0x2d
'Lao ', # 0x2e
'Xiao ', # 0x2f
'Ji ', # 0x30
'Zhu ', # 0x31
'Chao ', # 0x32
'Kui ', # 0x33
'Zui ', # 0x34
'Xiao ', # 0x35
'Si ', # 0x36
'Hao ', # 0x37
'Fu ', # 0x38
'Liao ', # 0x39
'Qiao ', # 0x3a
'Xi ', # 0x3b
'Xiu ', # 0x3c
'Tan ', # 0x3d
'Tan ', # 0x3e
'Mo ', # 0x3f
'Xun ', # 0x40
'E ', # 0x41
'Zun ', # 0x42
'Fan ', # 0x43
'Chi ', # 0x44
'Hui ', # 0x45
'Zan ', # 0x46
'Chuang ', # 0x47
'Cu ', # 0x48
'Dan ', # 0x49
'Yu ', # 0x4a
'Tun ', # 0x4b
'Cheng ', # 0x4c
'Jiao ', # 0x4d
'Ye ', # 0x4e
'Xi ', # 0x4f
'Qi ', # 0x50
'Hao ', # 0x51
'Lian ', # 0x52
'Xu ', # 0x53
'Deng ', # 0x54
'Hui ', # 0x55
'Yin ', # 0x56
'Pu ', # 0x57
'Jue ', # 0x58
'Qin ', # 0x59
'Xun ', # 0x5a
'Nie ', # 0x5b
'Lu ', # 0x5c
'Si ', # 0x5d
'Yan ', # 0x5e
'Ying ', # 0x5f
'Da ', # 0x60
'Dan ', # 0x61
'Yu ', # 0x62
'Zhou ', # 0x63
'Jin ', # 0x64
'Nong ', # 0x65
'Yue ', # 0x66
'Hui ', # 0x67
'Qi ', # 0x68
'E ', # 0x69
'Zao ', # 0x6a
'Yi ', # 0x6b
'Shi ', # 0x6c
'Jiao ', # 0x6d
'Yuan ', # 0x6e
'Ai ', # 0x6f
'Yong ', # 0x70
'Jue ', # 0x71
'Kuai ', # 0x72
'Yu ', # 0x73
'Pen ', # 0x74
'Dao ', # 0x75
'Ge ', # 0x76
'Xin ', # 0x77
'Dun ', # 0x78
'Dang ', # 0x79
'Sin ', # 0x7a
'Sai ', # 0x7b
'Pi ', # 0x7c
'Pi ', # 0x7d
'Yin ', # 0x7e
'Zui ', # 0x7f
'Ning ', # 0x80
'Di ', # 0x81
'Lan ', # 0x82
'Ta ', # 0x83
'Huo ', # 0x84
'Ru ', # 0x85
'Hao ', # 0x86
'Xia ', # 0x87
'Ya ', # 0x88
'Duo ', # 0x89
'Xi ', # 0x8a
'Chou ', # 0x8b
'Ji ', # 0x8c
'Jin ', # 0x8d
'Hao ', # 0x8e
'Ti ', # 0x8f
'Chang ', # 0x90
'[?] ', # 0x91
'[?] ', # 0x92
'Ca ', # 0x93
'Ti ', # 0x94
'Lu ', # 0x95
'Hui ', # 0x96
'Bo ', # 0x97
'You ', # 0x98
'Nie ', # 0x99
'Yin ', # 0x9a
'Hu ', # 0x9b
'Mo ', # 0x9c
'Huang ', # 0x9d
'Zhe ', # 0x9e
'Li ', # 0x9f
'Liu ', # 0xa0
'Haai ', # 0xa1
'Nang ', # 0xa2
'Xiao ', # 0xa3
'Mo ', # 0xa4
'Yan ', # 0xa5
'Li ', # 0xa6
'Lu ', # 0xa7
'Long ', # 0xa8
'Fu ', # 0xa9
'Dan ', # 0xaa
'Chen ', # 0xab
'Pin ', # 0xac
'Pi ', # 0xad
'Xiang ', # 0xae
'Huo ', # 0xaf
'Mo ', # 0xb0
'Xi ', # 0xb1
'Duo ', # 0xb2
'Ku ', # 0xb3
'Yan ', # 0xb4
'Chan ', # 0xb5
'Ying ', # 0xb6
'Rang ', # 0xb7
'Dian ', # 0xb8
'La ', # 0xb9
'Ta ', # 0xba
'Xiao ', # 0xbb
'Jiao ', # 0xbc
'Chuo ', # 0xbd
'Huan ', # 0xbe
'Huo ', # 0xbf
'Zhuan ', # 0xc0
'Nie ', # 0xc1
'Xiao ', # 0xc2
'Ca ', # 0xc3
'Li ', # 0xc4
'Chan ', # 0xc5
'Chai ', # 0xc6
'Li ', # 0xc7
'Yi ', # 0xc8
'Luo ', # 0xc9
'Nang ', # 0xca
'Zan ', # 0xcb
'Su ', # 0xcc
'Xi ', # 0xcd
'So ', # 0xce
'Jian ', # 0xcf
'Za ', # 0xd0
'Zhu ', # 0xd1
'Lan ', # 0xd2
'Nie ', # 0xd3
'Nang ', # 0xd4
'[?] ', # 0xd5
'[?] ', # 0xd6
'Wei ', # 0xd7
'Hui ', # 0xd8
'Yin ', # 0xd9
'Qiu ', # 0xda
'Si ', # 0xdb
'Nin ', # 0xdc
'Jian ', # 0xdd
'Hui ', # 0xde
'Xin ', # 0xdf
'Yin ', # 0xe0
'Nan ', # 0xe1
'Tuan ', # 0xe2
'Tuan ', # 0xe3
'Dun ', # 0xe4
'Kang ', # 0xe5
'Yuan ', # 0xe6
'Jiong ', # 0xe7
'Pian ', # 0xe8
'Yun ', # 0xe9
'Cong ', # 0xea
'Hu ', # 0xeb
'Hui ', # 0xec
'Yuan ', # 0xed
'You ', # 0xee
'Guo ', # 0xef
'Kun ', # 0xf0
'Cong ', # 0xf1
'Wei ', # 0xf2
'Tu ', # 0xf3
'Wei ', # 0xf4
'Lun ', # 0xf5
'Guo ', # 0xf6
'Qun ', # 0xf7
'Ri ', # 0xf8
'Ling ', # 0xf9
'Gu ', # 0xfa
'Guo ', # 0xfb
'Tai ', # 0xfc
'Guo ', # 0xfd
'Tu ', # 0xfe
'You ', # 0xff
)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.